From 162dad1cfbdb556a6d9d35d98e2c79d899adea9c Mon Sep 17 00:00:00 2001 From: "kaf24@firebug.cl.cam.ac.uk" Date: Thu, 28 Apr 2005 14:04:13 +0000 Subject: [PATCH] bitkeeper revision 1.1389.1.2 (4270ed5dZvr_HdIQR0eBM2m4Kj81_A) Renames: execution_context/xen_regs -> cpu_user_regs full_execution_context -> vcpu_guest_context [defined both 'struct xxx' and 'xxx_t' forms] Signed-off-by: Keir Fraser --- .../arch/xen/i386/kernel/smpboot.c | 20 +-- .../include/asm-xen/asm-i386/hypercall.h | 2 +- .../include/asm-xen/asm-x86_64/hypercall.h | 2 +- tools/libxc/xc.h | 2 +- tools/libxc/xc_domain.c | 2 +- tools/libxc/xc_linux_build.c | 24 +-- tools/libxc/xc_linux_restore.c | 8 +- tools/libxc/xc_linux_save.c | 10 +- tools/libxc/xc_plan9_build.c | 28 +-- tools/libxc/xc_ptrace.c | 14 +- tools/libxc/xc_vmx_build.c | 36 ++-- tools/xentrace/xenctx.c | 33 ++-- xen/arch/ia64/dom0_ops.c | 2 +- xen/arch/ia64/domain.c | 4 +- xen/arch/ia64/xenmisc.c | 2 +- xen/arch/x86/apic.c | 6 +- xen/arch/x86/cdb.c | 6 +- xen/arch/x86/dom0_ops.c | 18 +- xen/arch/x86/domain.c | 166 +++++++++--------- xen/arch/x86/extable.c | 2 +- xen/arch/x86/irq.c | 4 +- xen/arch/x86/mm.c | 2 +- xen/arch/x86/nmi.c | 2 +- xen/arch/x86/shadow.c | 2 +- xen/arch/x86/time.c | 2 +- xen/arch/x86/traps.c | 34 ++-- xen/arch/x86/vmx.c | 42 ++--- xen/arch/x86/vmx_intercept.c | 10 +- xen/arch/x86/vmx_io.c | 36 ++-- xen/arch/x86/vmx_platform.c | 14 +- xen/arch/x86/vmx_vmcs.c | 48 ++--- xen/arch/x86/x86_32/asm-offsets.c | 40 ++--- xen/arch/x86/x86_32/call_with_regs.S | 52 +++--- xen/arch/x86/x86_32/entry.S | 150 ++++++++-------- xen/arch/x86/x86_32/seg_fixup.c | 2 +- xen/arch/x86/x86_32/traps.c | 13 +- xen/arch/x86/x86_64/asm-offsets.c | 48 ++--- xen/arch/x86/x86_64/entry.S | 58 +++--- xen/arch/x86/x86_64/mm.c | 6 +- xen/arch/x86/x86_64/traps.c | 11 +- xen/arch/x86/x86_emulate.c | 6 +- xen/common/dom0_ops.c | 6 +- xen/common/domain.c | 10 +- xen/common/keyhandler.c | 17 +- xen/drivers/char/console.c | 4 +- xen/drivers/char/serial.c | 4 +- xen/include/asm-ia64/debugger.h | 4 +- xen/include/asm-ia64/domain.h | 2 +- xen/include/asm-ia64/regs.h | 2 +- xen/include/asm-x86/apic.h | 4 +- xen/include/asm-x86/debugger.h | 12 +- xen/include/asm-x86/domain.h | 2 +- xen/include/asm-x86/processor.h | 10 +- xen/include/asm-x86/shadow.h | 2 +- xen/include/asm-x86/vmx.h | 2 +- xen/include/asm-x86/vmx_platform.h | 4 +- xen/include/asm-x86/vmx_vmcs.h | 4 +- xen/include/asm-x86/x86_32/asm_defns.h | 26 +-- xen/include/asm-x86/x86_32/current.h | 12 +- xen/include/asm-x86/x86_32/regs.h | 2 +- xen/include/asm-x86/x86_64/asm_defns.h | 2 +- xen/include/asm-x86/x86_64/current.h | 12 +- xen/include/asm-x86/x86_64/regs.h | 2 +- xen/include/asm-x86/x86_emulate.h | 6 +- xen/include/public/arch-ia64.h | 6 +- xen/include/public/arch-x86_32.h | 13 +- xen/include/public/arch-x86_64.h | 13 +- xen/include/public/dom0_ops.h | 4 +- xen/include/xen/domain.h | 2 +- xen/include/xen/irq.h | 4 +- xen/include/xen/keyhandler.h | 4 +- xen/include/xen/serial.h | 2 +- 72 files changed, 586 insertions(+), 582 deletions(-) diff --git a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c index 117ca92150..2dbf547755 100644 --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c @@ -820,7 +820,7 @@ static int __init do_boot_cpu(int apicid) #if 0 unsigned short nmi_high = 0, nmi_low = 0; #endif - full_execution_context_t ctxt; + vcpu_guest_context_t ctxt; extern void startup_32_smp(void); extern void hypervisor_callback(void); extern void failsafe_callback(void); @@ -865,15 +865,15 @@ static int __init do_boot_cpu(int apicid) memset(&ctxt, 0, sizeof(ctxt)); - ctxt.cpu_ctxt.ds = __USER_DS; - ctxt.cpu_ctxt.es = __USER_DS; - ctxt.cpu_ctxt.fs = 0; - ctxt.cpu_ctxt.gs = 0; - ctxt.cpu_ctxt.ss = __KERNEL_DS; - ctxt.cpu_ctxt.cs = __KERNEL_CS; - ctxt.cpu_ctxt.eip = start_eip; - ctxt.cpu_ctxt.esp = idle->thread.esp; - ctxt.cpu_ctxt.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12); + ctxt.user_regs.ds = __USER_DS; + ctxt.user_regs.es = __USER_DS; + ctxt.user_regs.fs = 0; + ctxt.user_regs.gs = 0; + ctxt.user_regs.ss = __KERNEL_DS; + ctxt.user_regs.cs = __KERNEL_CS; + ctxt.user_regs.eip = start_eip; + ctxt.user_regs.esp = idle->thread.esp; + ctxt.user_regs.eflags = (1<<9) | (1<<2) | (idle->thread.io_pl<<12); /* FPU is set up to default initial state. */ memset(ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt)); diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h index 4dff4bbd72..187d44f874 100644 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/hypercall.h @@ -519,7 +519,7 @@ HYPERVISOR_vm_assist( static inline int HYPERVISOR_boot_vcpu( - unsigned long vcpu, full_execution_context_t *ctxt) + unsigned long vcpu, vcpu_guest_context_t *ctxt) { int ret; unsigned long ign1, ign2; diff --git a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h index ff8fdcafb8..2bfb9e7262 100644 --- a/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h +++ b/linux-2.6.11-xen-sparse/include/asm-xen/asm-x86_64/hypercall.h @@ -489,7 +489,7 @@ HYPERVISOR_switch_to_user(void) static inline int HYPERVISOR_boot_vcpu( - unsigned long vcpu, full_execution_context_t *ctxt) + unsigned long vcpu, vcpu_guest_context_t *ctxt) { int ret; diff --git a/tools/libxc/xc.h b/tools/libxc/xc.h index 72c6e10876..9eac0a7a18 100644 --- a/tools/libxc/xc.h +++ b/tools/libxc/xc.h @@ -160,7 +160,7 @@ int xc_domain_getfullinfo(int xc_handle, u32 domid, u32 vcpu, xc_domaininfo_t *info, - full_execution_context_t *ctxt); + vcpu_guest_context_t *ctxt); int xc_domain_setcpuweight(int xc_handle, u32 domid, float weight); diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 1d8815a8a6..1441c38546 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -144,7 +144,7 @@ int xc_domain_getfullinfo(int xc_handle, u32 domid, u32 vcpu, xc_domaininfo_t *info, - full_execution_context_t *ctxt) + vcpu_guest_context_t *ctxt) { int rc, errno_saved; dom0_op_t op; diff --git a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c index d4b28de06e..1691415e63 100644 --- a/tools/libxc/xc_linux_build.c +++ b/tools/libxc/xc_linux_build.c @@ -45,7 +45,7 @@ static int setup_guest(int xc_handle, gzFile initrd_gfd, unsigned long initrd_len, unsigned long nr_pages, unsigned long *pvsi, unsigned long *pvke, - full_execution_context_t *ctxt, + vcpu_guest_context_t *ctxt, const char *cmdline, unsigned long shared_info_frame, unsigned int control_evtchn, @@ -316,7 +316,7 @@ int xc_linux_build(int xc_handle, int initrd_fd = -1; gzFile initrd_gfd = NULL; int rc, i; - full_execution_context_t st_ctxt, *ctxt = &st_ctxt; + vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; unsigned long nr_pages; char *image = NULL; unsigned long image_size, initrd_size=0; @@ -400,16 +400,16 @@ int xc_linux_build(int xc_handle, * [EAX,EBX,ECX,EDX,EDI,EBP are zero] * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1) */ - ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.es = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS; - ctxt->cpu_ctxt.eip = vkern_entry; - ctxt->cpu_ctxt.esp = vstartinfo_start + 2*PAGE_SIZE; - ctxt->cpu_ctxt.esi = vstartinfo_start; - ctxt->cpu_ctxt.eflags = (1<<9) | (1<<2); + ctxt->user_regs.ds = FLAT_KERNEL_DS; + ctxt->user_regs.es = FLAT_KERNEL_DS; + ctxt->user_regs.fs = FLAT_KERNEL_DS; + ctxt->user_regs.gs = FLAT_KERNEL_DS; + ctxt->user_regs.ss = FLAT_KERNEL_DS; + ctxt->user_regs.cs = FLAT_KERNEL_CS; + ctxt->user_regs.eip = vkern_entry; + ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE; + ctxt->user_regs.esi = vstartinfo_start; + ctxt->user_regs.eflags = (1<<9) | (1<<2); /* FPU is set up to default initial state. */ memset(ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); diff --git a/tools/libxc/xc_linux_restore.c b/tools/libxc/xc_linux_restore.c index 30b6280414..3d3f4c1e18 100644 --- a/tools/libxc/xc_linux_restore.c +++ b/tools/libxc/xc_linux_restore.c @@ -73,7 +73,7 @@ int xc_linux_restore(int xc_handle, XcIOContext *ioctxt) shared_info_t *shared_info = (shared_info_t *)shared_info_page; /* A copy of the CPU context of the guest. */ - full_execution_context_t ctxt; + vcpu_guest_context_t ctxt; /* First 16 bytes of the state file must contain 'LinuxGuestRecord'. */ char signature[16]; @@ -505,13 +505,13 @@ int xc_linux_restore(int xc_handle, XcIOContext *ioctxt) } /* Uncanonicalise the suspend-record frame number and poke resume rec. */ - pfn = ctxt.cpu_ctxt.esi; + pfn = ctxt.user_regs.esi; if ( (pfn >= nr_pfns) || (pfn_type[pfn] != NOTAB) ) { xcio_error(ioctxt, "Suspend record frame number is bad"); goto out; } - ctxt.cpu_ctxt.esi = mfn = pfn_to_mfn_table[pfn]; + ctxt.user_regs.esi = mfn = pfn_to_mfn_table[pfn]; p_srec = xc_map_foreign_range( xc_handle, dom, PAGE_SIZE, PROT_WRITE, mfn); p_srec->resume_info.nr_pages = nr_pfns; @@ -599,7 +599,7 @@ int xc_linux_restore(int xc_handle, XcIOContext *ioctxt) /* * Safety checking of saved context: - * 1. cpu_ctxt is fine, as Xen checks that on context switch. + * 1. user_regs is fine, as Xen checks that on context switch. * 2. fpu_ctxt is fine, as it can't hurt Xen. * 3. trap_ctxt needs the code selectors checked. * 4. fast_trap_idx is checked by Xen. diff --git a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c index 8600e0fc59..f8a28cf43f 100644 --- a/tools/libxc/xc_linux_save.c +++ b/tools/libxc/xc_linux_save.c @@ -325,7 +325,7 @@ static int analysis_phase( int xc_handle, u32 domid, int suspend_and_state(int xc_handle, XcIOContext *ioctxt, xc_domaininfo_t *info, - full_execution_context_t *ctxt) + vcpu_guest_context_t *ctxt) { int i=0; @@ -391,7 +391,7 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt) unsigned long shared_info_frame; /* A copy of the CPU context of the guest. */ - full_execution_context_t ctxt; + vcpu_guest_context_t ctxt; /* A table containg the type of each PFN (/not/ MFN!). */ unsigned long *pfn_type = NULL; @@ -922,7 +922,7 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt) "SUSPEND flags %08u shinfo %08lx eip %08u " "esi %08u\n",info.flags, info.shared_info_frame, - ctxt.cpu_ctxt.eip, ctxt.cpu_ctxt.esi ); + ctxt.user_regs.eip, ctxt.user_regs.esi ); } if ( xc_shadow_control( xc_handle, domid, @@ -995,7 +995,7 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt) domid for this to succeed. */ p_srec = xc_map_foreign_range(xc_handle, domid, sizeof(*p_srec), PROT_READ, - ctxt.cpu_ctxt.esi); + ctxt.user_regs.esi); if (!p_srec){ xcio_error(ioctxt, "Couldn't map suspend record"); goto out; @@ -1009,7 +1009,7 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt) } /* Canonicalise the suspend-record frame number. */ - if ( !translate_mfn_to_pfn(&ctxt.cpu_ctxt.esi) ){ + if ( !translate_mfn_to_pfn(&ctxt.user_regs.esi) ){ xcio_error(ioctxt, "Suspend record is not in range of pseudophys map"); goto out; } diff --git a/tools/libxc/xc_plan9_build.c b/tools/libxc/xc_plan9_build.c index 3476136196..bb7356dc72 100644 --- a/tools/libxc/xc_plan9_build.c +++ b/tools/libxc/xc_plan9_build.c @@ -113,7 +113,7 @@ setup_guest(int xc_handle, unsigned long tot_pages, unsigned long *virt_startinfo_addr, unsigned long *virt_load_addr, - full_execution_context_t * ctxt, + vcpu_guest_context_t * ctxt, const char *cmdline, unsigned long shared_info_frame, unsigned int control_evtchn, @@ -411,7 +411,7 @@ xc_plan9_build(int xc_handle, int kernel_fd = -1; gzFile kernel_gfd = NULL; int rc, i; - full_execution_context_t st_ctxt, *ctxt = &st_ctxt; + vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; unsigned long virt_startinfo_addr; if ((tot_pages = xc_get_tot_pages(xc_handle, domid)) < 0) { @@ -482,20 +482,20 @@ xc_plan9_build(int xc_handle, * [EAX,EBX,ECX,EDX,EDI,EBP are zero] * EFLAGS = IF | 2 (bit 1 is reserved and should always be 1) */ - ctxt->cpu_ctxt.ds = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.es = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.fs = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.gs = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.ss = FLAT_KERNEL_DS; - ctxt->cpu_ctxt.cs = FLAT_KERNEL_CS; - ctxt->cpu_ctxt.eip = load_addr; - ctxt->cpu_ctxt.eip = 0x80100020; + ctxt->user_regs.ds = FLAT_KERNEL_DS; + ctxt->user_regs.es = FLAT_KERNEL_DS; + ctxt->user_regs.fs = FLAT_KERNEL_DS; + ctxt->user_regs.gs = FLAT_KERNEL_DS; + ctxt->user_regs.ss = FLAT_KERNEL_DS; + ctxt->user_regs.cs = FLAT_KERNEL_CS; + ctxt->user_regs.eip = load_addr; + ctxt->user_regs.eip = 0x80100020; /* put stack at top of second page */ - ctxt->cpu_ctxt.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT); + ctxt->user_regs.esp = 0x80000000 + (STACKPAGE << PAGE_SHIFT); /* why is this set? */ - ctxt->cpu_ctxt.esi = ctxt->cpu_ctxt.esp; - ctxt->cpu_ctxt.eflags = (1 << 9) | (1 << 2); + ctxt->user_regs.esi = ctxt->user_regs.esp; + ctxt->user_regs.eflags = (1 << 9) | (1 << 2); /* FPU is set up to default initial state. */ memset(ctxt->fpu_ctxt, 0, sizeof (ctxt->fpu_ctxt)); @@ -519,7 +519,7 @@ xc_plan9_build(int xc_handle, /* Ring 1 stack is the initial stack. */ /* put stack at top of second page */ ctxt->kernel_ss = FLAT_KERNEL_DS; - ctxt->kernel_esp = ctxt->cpu_ctxt.esp; + ctxt->kernel_esp = ctxt->user_regs.esp; /* No debugging. */ memset(ctxt->debugreg, 0, sizeof (ctxt->debugreg)); diff --git a/tools/libxc/xc_ptrace.c b/tools/libxc/xc_ptrace.c index b7e6e89562..4c79d497a7 100644 --- a/tools/libxc/xc_ptrace.c +++ b/tools/libxc/xc_ptrace.c @@ -132,7 +132,7 @@ static long nr_pages = 0; unsigned long *page_array = NULL; static int regs_valid[MAX_VIRT_CPUS]; static unsigned long cr3[MAX_VIRT_CPUS]; -static full_execution_context_t ctxt[MAX_VIRT_CPUS]; +static vcpu_guest_context_t ctxt[MAX_VIRT_CPUS]; /* --------------------- */ @@ -220,7 +220,7 @@ waitdomain(int domain, int *status, int options) { dom0_op_t op; int retval; - full_execution_context_t ctxt; + vcpu_guest_context_t ctxt; struct timespec ts; ts.tv_sec = 0; ts.tv_nsec = 10*1000*1000; @@ -300,7 +300,7 @@ xc_ptrace(enum __ptrace_request request, pid_t domid, void *addr, void *data) FETCH_REGS(cpu); if (request == PTRACE_GETREGS) { - SET_PT_REGS(pt, ctxt[cpu].cpu_ctxt); + SET_PT_REGS(pt, ctxt[cpu].user_regs); memcpy(data, &pt, sizeof(elf_gregset_t)); } else if (request == PTRACE_GETFPREGS) memcpy(data, &ctxt[cpu].fpu_ctxt, sizeof(ctxt[cpu].fpu_ctxt)); @@ -309,7 +309,7 @@ xc_ptrace(enum __ptrace_request request, pid_t domid, void *addr, void *data) break; case PTRACE_SETREGS: op.cmd = DOM0_SETDOMAININFO; - SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].cpu_ctxt); + SET_XC_REGS(((struct gdb_regs *)data), ctxt[VCPU].user_regs); op.u.setdomaininfo.domain = domid; /* XXX need to understand multiple exec_domains */ op.u.setdomaininfo.exec_domain = cpu; @@ -339,7 +339,7 @@ xc_ptrace(enum __ptrace_request request, pid_t domid, void *addr, void *data) retval = do_dom0_op(xc_handle, &op); break; case PTRACE_SINGLESTEP: - ctxt[VCPU].cpu_ctxt.eflags |= PSL_T; + ctxt[VCPU].user_regs.eflags |= PSL_T; op.cmd = DOM0_SETDOMAININFO; op.u.setdomaininfo.domain = domid; op.u.setdomaininfo.exec_domain = 0; @@ -355,8 +355,8 @@ xc_ptrace(enum __ptrace_request request, pid_t domid, void *addr, void *data) if (request != PTRACE_SINGLESTEP) { FETCH_REGS(cpu); /* Clear trace flag */ - if (ctxt[cpu].cpu_ctxt.eflags & PSL_T) { - ctxt[cpu].cpu_ctxt.eflags &= ~PSL_T; + if (ctxt[cpu].user_regs.eflags & PSL_T) { + ctxt[cpu].user_regs.eflags &= ~PSL_T; op.cmd = DOM0_SETDOMAININFO; op.u.setdomaininfo.domain = domid; op.u.setdomaininfo.exec_domain = cpu; diff --git a/tools/libxc/xc_vmx_build.c b/tools/libxc/xc_vmx_build.c index dcd4f574f5..786f7a3cb6 100644 --- a/tools/libxc/xc_vmx_build.c +++ b/tools/libxc/xc_vmx_build.c @@ -149,7 +149,7 @@ static int setup_guest(int xc_handle, char *image, unsigned long image_size, gzFile initrd_gfd, unsigned long initrd_len, unsigned long nr_pages, - full_execution_context_t *ctxt, + vcpu_guest_context_t *ctxt, const char *cmdline, unsigned long shared_info_frame, unsigned int control_evtchn, @@ -422,22 +422,22 @@ static int setup_guest(int xc_handle, /* * Initial register values: */ - ctxt->cpu_ctxt.ds = 0x68; - ctxt->cpu_ctxt.es = 0x0; - ctxt->cpu_ctxt.fs = 0x0; - ctxt->cpu_ctxt.gs = 0x0; - ctxt->cpu_ctxt.ss = 0x68; - ctxt->cpu_ctxt.cs = 0x60; - ctxt->cpu_ctxt.eip = dsi.v_kernentry; - ctxt->cpu_ctxt.edx = vboot_gdt_start; - ctxt->cpu_ctxt.eax = 0x800; - ctxt->cpu_ctxt.esp = vboot_gdt_end; - ctxt->cpu_ctxt.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */ - ctxt->cpu_ctxt.ecx = mem_mapp->nr_map; - ctxt->cpu_ctxt.esi = vboot_params_start; - ctxt->cpu_ctxt.edi = vboot_params_start + 0x2d0; - - ctxt->cpu_ctxt.eflags = (1<<2); + ctxt->user_regs.ds = 0x68; + ctxt->user_regs.es = 0x0; + ctxt->user_regs.fs = 0x0; + ctxt->user_regs.gs = 0x0; + ctxt->user_regs.ss = 0x68; + ctxt->user_regs.cs = 0x60; + ctxt->user_regs.eip = dsi.v_kernentry; + ctxt->user_regs.edx = vboot_gdt_start; + ctxt->user_regs.eax = 0x800; + ctxt->user_regs.esp = vboot_gdt_end; + ctxt->user_regs.ebx = 0; /* startup_32 expects this to be 0 to signal boot cpu */ + ctxt->user_regs.ecx = mem_mapp->nr_map; + ctxt->user_regs.esi = vboot_params_start; + ctxt->user_regs.edi = vboot_params_start + 0x2d0; + + ctxt->user_regs.eflags = (1<<2); return 0; @@ -488,7 +488,7 @@ int xc_vmx_build(int xc_handle, int initrd_fd = -1; gzFile initrd_gfd = NULL; int rc, i; - full_execution_context_t st_ctxt, *ctxt = &st_ctxt; + vcpu_guest_context_t st_ctxt, *ctxt = &st_ctxt; unsigned long nr_pages; char *image = NULL; unsigned long image_size, initrd_size=0; diff --git a/tools/xentrace/xenctx.c b/tools/xentrace/xenctx.c index 4a65d53169..28dfd360ac 100644 --- a/tools/xentrace/xenctx.c +++ b/tools/xentrace/xenctx.c @@ -24,27 +24,26 @@ #include "xc.h" #ifdef __i386__ -void -print_ctx(full_execution_context_t *ctx1) +void print_ctx(vcpu_guest_context_t *ctx1) { - execution_context_t *ctx = &ctx1->cpu_ctxt; + struct cpu_user_regs *regs = &ctx1->user_regs; - printf("eip: %08lx\t", ctx->eip); - printf("esp: %08lx\n", ctx->esp); + printf("eip: %08lx\t", regs->eip); + printf("esp: %08lx\n", regs->esp); - printf("eax: %08lx\t", ctx->eax); - printf("ebx: %08lx\t", ctx->ebx); - printf("ecx: %08lx\t", ctx->ecx); - printf("edx: %08lx\n", ctx->edx); + printf("eax: %08lx\t", regs->eax); + printf("ebx: %08lx\t", regs->ebx); + printf("ecx: %08lx\t", regs->ecx); + printf("edx: %08lx\n", regs->edx); - printf("esi: %08lx\t", ctx->esi); - printf("edi: %08lx\t", ctx->edi); - printf("ebp: %08lx\n", ctx->ebp); + printf("esi: %08lx\t", regs->esi); + printf("edi: %08lx\t", regs->edi); + printf("ebp: %08lx\n", regs->ebp); - printf(" cs: %08lx\t", ctx->cs); - printf(" ds: %08lx\t", ctx->ds); - printf(" fs: %08lx\t", ctx->fs); - printf(" gs: %08lx\n", ctx->gs); + printf(" cs: %08lx\t", regs->cs); + printf(" ds: %08lx\t", regs->ds); + printf(" fs: %08lx\t", regs->fs); + printf(" gs: %08lx\n", regs->gs); } #endif @@ -53,7 +52,7 @@ void dump_ctx(u32 domid, u32 vcpu) { int ret; xc_domaininfo_t info; - full_execution_context_t ctx; + vcpu_guest_context_t ctx; int xc_handle = xc_interface_open(); /* for accessing control interface */ diff --git a/xen/arch/ia64/dom0_ops.c b/xen/arch/ia64/dom0_ops.c index dc8fa16424..daaa87445c 100644 --- a/xen/arch/ia64/dom0_ops.c +++ b/xen/arch/ia64/dom0_ops.c @@ -47,7 +47,7 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op) return ret; } -void arch_getdomaininfo_ctxt(struct domain *d, full_execution_context_t *c) +void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c) { int i; diff --git a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c index 7ac2df3cd4..056e9a711b 100644 --- a/xen/arch/ia64/domain.c +++ b/xen/arch/ia64/domain.c @@ -199,13 +199,13 @@ void arch_do_boot_vcpu(struct exec_domain *p) return; } -int arch_set_info_guest(struct exec_domain *p, full_execution_context_t *c) +int arch_set_info_guest(struct exec_domain *p, struct vcpu_guest_context *c) { dummy(); return 1; } -int arch_final_setup_guest(struct exec_domain *p, full_execution_context_t *c) +int arch_final_setup_guest(struct exec_domain *p, struct vcpu_guest_context *c) { dummy(); return 1; diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c index 382def35c1..d740fe7aa9 100644 --- a/xen/arch/ia64/xenmisc.c +++ b/xen/arch/ia64/xenmisc.c @@ -66,7 +66,7 @@ void grant_table_destroy(struct domain *d) return; } -struct pt_regs *get_execution_context(void) { return ia64_task_regs(current); } +struct pt_regs *get_cpu_user_regs(void) { return ia64_task_regs(current); } void raise_actimer_softirq(void) { diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c index f3b5c64f77..766a10ed7c 100644 --- a/xen/arch/x86/apic.c +++ b/xen/arch/x86/apic.c @@ -825,7 +825,7 @@ int reprogram_ac_timer(s_time_t timeout) return 1; } -void smp_apic_timer_interrupt(struct xen_regs * regs) +void smp_apic_timer_interrupt(struct cpu_user_regs * regs) { ack_APIC_irq(); perfc_incrc(apic_timer); @@ -835,7 +835,7 @@ void smp_apic_timer_interrupt(struct xen_regs * regs) /* * This interrupt should _never_ happen with our APIC/SMP architecture */ -asmlinkage void smp_spurious_interrupt(struct xen_regs *regs) +asmlinkage void smp_spurious_interrupt(struct cpu_user_regs *regs) { unsigned long v; @@ -857,7 +857,7 @@ asmlinkage void smp_spurious_interrupt(struct xen_regs *regs) * This interrupt should never happen with our APIC/SMP architecture */ -asmlinkage void smp_error_interrupt(struct xen_regs *regs) +asmlinkage void smp_error_interrupt(struct cpu_user_regs *regs) { unsigned long v, v1; diff --git a/xen/arch/x86/cdb.c b/xen/arch/x86/cdb.c index 458e2d64ed..6eb3515ec2 100644 --- a/xen/arch/x86/cdb.c +++ b/xen/arch/x86/cdb.c @@ -214,7 +214,7 @@ xendbg_send_reply(const char *buf, struct xendbg_context *ctx) } static int -handle_register_read_command(struct xen_regs *regs, struct xendbg_context *ctx) +handle_register_read_command(struct cpu_user_regs *regs, struct xendbg_context *ctx) { char buf[121]; @@ -240,7 +240,7 @@ handle_register_read_command(struct xen_regs *regs, struct xendbg_context *ctx) } static int -process_command(char *received_packet, struct xen_regs *regs, +process_command(char *received_packet, struct cpu_user_regs *regs, struct xendbg_context *ctx) { char *ptr; @@ -318,7 +318,7 @@ xdb_ctx = { }; int -__trap_to_cdb(struct xen_regs *regs) +__trap_to_cdb(struct cpu_user_regs *regs) { int resume = 0; int r; diff --git a/xen/arch/x86/dom0_ops.c b/xen/arch/x86/dom0_ops.c index 35ea082e28..a59b118d22 100644 --- a/xen/arch/x86/dom0_ops.c +++ b/xen/arch/x86/dom0_ops.c @@ -374,33 +374,33 @@ long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op) } void arch_getdomaininfo_ctxt( - struct exec_domain *ed, full_execution_context_t *c) + struct exec_domain *ed, struct vcpu_guest_context *c) { int i; #ifdef __i386__ /* Remove when x86_64 VMX is implemented */ #ifdef CONFIG_VMX - extern void save_vmx_execution_context(execution_context_t *); + extern void save_vmx_cpu_user_regs(struct cpu_user_regs *); #endif #endif c->flags = 0; - memcpy(&c->cpu_ctxt, - &ed->arch.user_ctxt, - sizeof(ed->arch.user_ctxt)); + memcpy(&c->user_regs, + &ed->arch.user_regs, + sizeof(ed->arch.user_regs)); /* IOPL privileges are virtualised -- merge back into returned eflags. */ - BUG_ON((c->cpu_ctxt.eflags & EF_IOPL) != 0); - c->cpu_ctxt.eflags |= ed->arch.iopl << 12; + BUG_ON((c->user_regs.eflags & EF_IOPL) != 0); + c->user_regs.eflags |= ed->arch.iopl << 12; #ifdef __i386__ #ifdef CONFIG_VMX if ( VMX_DOMAIN(ed) ) - save_vmx_execution_context(&c->cpu_ctxt); + save_vmx_cpu_user_regs(&c->user_regs); #endif #endif if ( test_bit(EDF_DONEFPUINIT, &ed->ed_flags) ) c->flags |= ECF_I387_VALID; - if ( KERNEL_MODE(ed, &ed->arch.user_ctxt) ) + if ( KERNEL_MODE(ed, &ed->arch.user_regs) ) c->flags |= ECF_IN_KERNEL; #ifdef CONFIG_VMX if (VMX_DOMAIN(ed)) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index a5029a45d5..9a47fcaac8 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -312,14 +312,14 @@ void arch_vmx_do_launch(struct exec_domain *ed) reset_stack_and_jump(vmx_asm_do_launch); } -static int vmx_final_setup_guest(struct exec_domain *ed, - full_execution_context_t *full_context) +static int vmx_final_setup_guest( + struct exec_domain *ed, struct vcpu_guest_context *ctxt) { int error; - execution_context_t *context; + struct cpu_user_regs *regs; struct vmcs_struct *vmcs; - context = &full_context->cpu_ctxt; + regs = &ctxt->user_regs; /* * Create a new VMCS @@ -333,7 +333,7 @@ static int vmx_final_setup_guest(struct exec_domain *ed, ed->arch.arch_vmx.vmcs = vmcs; error = construct_vmcs( - &ed->arch.arch_vmx, context, full_context, VMCS_USE_HOST_ENV); + &ed->arch.arch_vmx, regs, ctxt, VMCS_USE_HOST_ENV); if ( error < 0 ) { printk("Failed to construct a new VMCS\n"); @@ -345,7 +345,7 @@ static int vmx_final_setup_guest(struct exec_domain *ed, #if defined (__i386) ed->arch.arch_vmx.vmx_platform.real_mode_data = - (unsigned long *) context->esi; + (unsigned long *) regs->esi; #endif if (ed == ed->domain->exec_domain[0]) { @@ -374,7 +374,7 @@ out: /* This is called by arch_final_setup_guest and do_boot_vcpu */ int arch_set_info_guest( - struct exec_domain *ed, full_execution_context_t *c) + struct exec_domain *ed, struct vcpu_guest_context *c) { struct domain *d = ed->domain; unsigned long phys_basetab; @@ -386,8 +386,8 @@ int arch_set_info_guest( * If SS RPL or DPL differs from CS RPL then we'll #GP. */ if (!(c->flags & ECF_VMX_GUEST)) - if ( ((c->cpu_ctxt.cs & 3) == 0) || - ((c->cpu_ctxt.ss & 3) == 0) ) + if ( ((c->user_regs.cs & 3) == 0) || + ((c->user_regs.ss & 3) == 0) ) return -EINVAL; clear_bit(EDF_DONEFPUINIT, &ed->ed_flags); @@ -398,21 +398,21 @@ int arch_set_info_guest( if ( c->flags & ECF_IN_KERNEL ) ed->arch.flags |= TF_kernel_mode; - memcpy(&ed->arch.user_ctxt, - &c->cpu_ctxt, - sizeof(ed->arch.user_ctxt)); + memcpy(&ed->arch.user_regs, + &c->user_regs, + sizeof(ed->arch.user_regs)); memcpy(&ed->arch.i387, &c->fpu_ctxt, sizeof(ed->arch.i387)); /* IOPL privileges are virtualised. */ - ed->arch.iopl = (ed->arch.user_ctxt.eflags >> 12) & 3; - ed->arch.user_ctxt.eflags &= ~EF_IOPL; + ed->arch.iopl = (ed->arch.user_regs.eflags >> 12) & 3; + ed->arch.user_regs.eflags &= ~EF_IOPL; /* Clear IOPL for unprivileged domains. */ if (!IS_PRIV(d)) - ed->arch.user_ctxt.eflags &= 0xffffcfff; + ed->arch.user_regs.eflags &= 0xffffcfff; if (test_bit(EDF_DONEINIT, &ed->ed_flags)) return 0; @@ -507,7 +507,7 @@ void new_thread(struct exec_domain *d, unsigned long start_stack, unsigned long start_info) { - execution_context_t *ec = &d->arch.user_ctxt; + struct cpu_user_regs *regs = &d->arch.user_regs; /* * Initial register values: @@ -517,15 +517,15 @@ void new_thread(struct exec_domain *d, * ESI = start_info * [EAX,EBX,ECX,EDX,EDI,EBP are zero] */ - ec->ds = ec->es = ec->fs = ec->gs = FLAT_KERNEL_DS; - ec->ss = FLAT_KERNEL_SS; - ec->cs = FLAT_KERNEL_CS; - ec->eip = start_pc; - ec->esp = start_stack; - ec->esi = start_info; - - __save_flags(ec->eflags); - ec->eflags |= X86_EFLAGS_IF; + regs->ds = regs->es = regs->fs = regs->gs = FLAT_KERNEL_DS; + regs->ss = FLAT_KERNEL_SS; + regs->cs = FLAT_KERNEL_CS; + regs->eip = start_pc; + regs->esp = start_stack; + regs->esi = start_info; + + __save_flags(regs->eflags); + regs->eflags |= X86_EFLAGS_IF; } @@ -560,60 +560,60 @@ static void load_segments(struct exec_domain *p, struct exec_domain *n) int all_segs_okay = 1; /* Either selector != 0 ==> reload. */ - if ( unlikely(p->arch.user_ctxt.ds | - n->arch.user_ctxt.ds) ) - all_segs_okay &= loadsegment(ds, n->arch.user_ctxt.ds); + if ( unlikely(p->arch.user_regs.ds | + n->arch.user_regs.ds) ) + all_segs_okay &= loadsegment(ds, n->arch.user_regs.ds); /* Either selector != 0 ==> reload. */ - if ( unlikely(p->arch.user_ctxt.es | - n->arch.user_ctxt.es) ) - all_segs_okay &= loadsegment(es, n->arch.user_ctxt.es); + if ( unlikely(p->arch.user_regs.es | + n->arch.user_regs.es) ) + all_segs_okay &= loadsegment(es, n->arch.user_regs.es); /* * Either selector != 0 ==> reload. * Also reload to reset FS_BASE if it was non-zero. */ - if ( unlikely(p->arch.user_ctxt.fs | - p->arch.user_ctxt.fs_base | - n->arch.user_ctxt.fs) ) + if ( unlikely(p->arch.user_regs.fs | + p->arch.user_regs.fs_base | + n->arch.user_regs.fs) ) { - all_segs_okay &= loadsegment(fs, n->arch.user_ctxt.fs); - if ( p->arch.user_ctxt.fs ) /* != 0 selector kills fs_base */ - p->arch.user_ctxt.fs_base = 0; + all_segs_okay &= loadsegment(fs, n->arch.user_regs.fs); + if ( p->arch.user_regs.fs ) /* != 0 selector kills fs_base */ + p->arch.user_regs.fs_base = 0; } /* * Either selector != 0 ==> reload. * Also reload to reset GS_BASE if it was non-zero. */ - if ( unlikely(p->arch.user_ctxt.gs | - p->arch.user_ctxt.gs_base_user | - n->arch.user_ctxt.gs) ) + if ( unlikely(p->arch.user_regs.gs | + p->arch.user_regs.gs_base_user | + n->arch.user_regs.gs) ) { /* Reset GS_BASE with user %gs? */ - if ( p->arch.user_ctxt.gs || !n->arch.user_ctxt.gs_base_user ) - all_segs_okay &= loadsegment(gs, n->arch.user_ctxt.gs); - if ( p->arch.user_ctxt.gs ) /* != 0 selector kills gs_base_user */ - p->arch.user_ctxt.gs_base_user = 0; + if ( p->arch.user_regs.gs || !n->arch.user_regs.gs_base_user ) + all_segs_okay &= loadsegment(gs, n->arch.user_regs.gs); + if ( p->arch.user_regs.gs ) /* != 0 selector kills gs_base_user */ + p->arch.user_regs.gs_base_user = 0; } /* This can only be non-zero if selector is NULL. */ - if ( n->arch.user_ctxt.fs_base ) + if ( n->arch.user_regs.fs_base ) wrmsr(MSR_FS_BASE, - n->arch.user_ctxt.fs_base, - n->arch.user_ctxt.fs_base>>32); + n->arch.user_regs.fs_base, + n->arch.user_regs.fs_base>>32); /* Most kernels have non-zero GS base, so don't bother testing. */ /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ wrmsr(MSR_SHADOW_GS_BASE, - n->arch.user_ctxt.gs_base_kernel, - n->arch.user_ctxt.gs_base_kernel>>32); + n->arch.user_regs.gs_base_kernel, + n->arch.user_regs.gs_base_kernel>>32); /* This can only be non-zero if selector is NULL. */ - if ( n->arch.user_ctxt.gs_base_user ) + if ( n->arch.user_regs.gs_base_user ) wrmsr(MSR_GS_BASE, - n->arch.user_ctxt.gs_base_user, - n->arch.user_ctxt.gs_base_user>>32); + n->arch.user_regs.gs_base_user, + n->arch.user_regs.gs_base_user>>32); /* If in kernel mode then switch the GS bases around. */ if ( n->arch.flags & TF_kernel_mode ) @@ -621,7 +621,7 @@ static void load_segments(struct exec_domain *p, struct exec_domain *n) if ( unlikely(!all_segs_okay) ) { - struct xen_regs *regs = get_execution_context(); + struct cpu_user_regs *regs = get_cpu_user_regs(); unsigned long *rsp = (n->arch.flags & TF_kernel_mode) ? (unsigned long *)regs->rsp : @@ -637,10 +637,10 @@ static void load_segments(struct exec_domain *p, struct exec_domain *n) put_user(regs->rflags, rsp- 3) | put_user(regs->cs, rsp- 4) | put_user(regs->rip, rsp- 5) | - put_user(n->arch.user_ctxt.gs, rsp- 6) | - put_user(n->arch.user_ctxt.fs, rsp- 7) | - put_user(n->arch.user_ctxt.es, rsp- 8) | - put_user(n->arch.user_ctxt.ds, rsp- 9) | + put_user(n->arch.user_regs.gs, rsp- 6) | + put_user(n->arch.user_regs.fs, rsp- 7) | + put_user(n->arch.user_regs.es, rsp- 8) | + put_user(n->arch.user_regs.ds, rsp- 9) | put_user(regs->r11, rsp-10) | put_user(regs->rcx, rsp-11) ) { @@ -659,10 +659,10 @@ static void load_segments(struct exec_domain *p, struct exec_domain *n) static void save_segments(struct exec_domain *p) { - __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_ctxt.ds) ); - __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_ctxt.es) ); - __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_ctxt.fs) ); - __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_ctxt.gs) ); + __asm__ __volatile__ ( "movl %%ds,%0" : "=m" (p->arch.user_regs.ds) ); + __asm__ __volatile__ ( "movl %%es,%0" : "=m" (p->arch.user_regs.es) ); + __asm__ __volatile__ ( "movl %%fs,%0" : "=m" (p->arch.user_regs.fs) ); + __asm__ __volatile__ ( "movl %%gs,%0" : "=m" (p->arch.user_regs.gs) ); } static void clear_segments(void) @@ -679,7 +679,7 @@ static void clear_segments(void) long do_switch_to_user(void) { - struct xen_regs *regs = get_execution_context(); + struct cpu_user_regs *regs = get_cpu_user_regs(); struct switch_to_user stu; struct exec_domain *ed = current; @@ -728,14 +728,14 @@ static inline void switch_kernel_stack(struct exec_domain *n, unsigned int cpu) static void __context_switch(void) { - execution_context_t *stack_ec = get_execution_context(); + struct cpu_user_regs *stack_ec = get_cpu_user_regs(); unsigned int cpu = smp_processor_id(); struct exec_domain *p = percpu_ctxt[cpu].curr_ed; struct exec_domain *n = current; if ( !is_idle_task(p->domain) ) { - memcpy(&p->arch.user_ctxt, + memcpy(&p->arch.user_regs, stack_ec, CTXT_SWITCH_STACK_BYTES); unlazy_fpu(p); @@ -746,7 +746,7 @@ static void __context_switch(void) if ( !is_idle_task(n->domain) ) { memcpy(stack_ec, - &n->arch.user_ctxt, + &n->arch.user_regs, CTXT_SWITCH_STACK_BYTES); /* Maybe switch the debug registers. */ @@ -844,7 +844,7 @@ unsigned long __hypercall_create_continuation( unsigned int op, unsigned int nr_args, ...) { struct mc_state *mcs = &mc_state[smp_processor_id()]; - execution_context_t *ec; + struct cpu_user_regs *regs; unsigned int i; va_list args; @@ -859,37 +859,37 @@ unsigned long __hypercall_create_continuation( } else { - ec = get_execution_context(); + regs = get_cpu_user_regs(); #if defined(__i386__) - ec->eax = op; - ec->eip -= 2; /* re-execute 'int 0x82' */ + regs->eax = op; + regs->eip -= 2; /* re-execute 'int 0x82' */ for ( i = 0; i < nr_args; i++ ) { switch ( i ) { - case 0: ec->ebx = va_arg(args, unsigned long); break; - case 1: ec->ecx = va_arg(args, unsigned long); break; - case 2: ec->edx = va_arg(args, unsigned long); break; - case 3: ec->esi = va_arg(args, unsigned long); break; - case 4: ec->edi = va_arg(args, unsigned long); break; - case 5: ec->ebp = va_arg(args, unsigned long); break; + case 0: regs->ebx = va_arg(args, unsigned long); break; + case 1: regs->ecx = va_arg(args, unsigned long); break; + case 2: regs->edx = va_arg(args, unsigned long); break; + case 3: regs->esi = va_arg(args, unsigned long); break; + case 4: regs->edi = va_arg(args, unsigned long); break; + case 5: regs->ebp = va_arg(args, unsigned long); break; } } #elif defined(__x86_64__) - ec->rax = op; - ec->rip -= 2; /* re-execute 'syscall' */ + regs->rax = op; + regs->rip -= 2; /* re-execute 'syscall' */ for ( i = 0; i < nr_args; i++ ) { switch ( i ) { - case 0: ec->rdi = va_arg(args, unsigned long); break; - case 1: ec->rsi = va_arg(args, unsigned long); break; - case 2: ec->rdx = va_arg(args, unsigned long); break; - case 3: ec->r10 = va_arg(args, unsigned long); break; - case 4: ec->r8 = va_arg(args, unsigned long); break; - case 5: ec->r9 = va_arg(args, unsigned long); break; + case 0: regs->rdi = va_arg(args, unsigned long); break; + case 1: regs->rsi = va_arg(args, unsigned long); break; + case 2: regs->rdx = va_arg(args, unsigned long); break; + case 3: regs->r10 = va_arg(args, unsigned long); break; + case 4: regs->r8 = va_arg(args, unsigned long); break; + case 5: regs->r9 = va_arg(args, unsigned long); break; } } #endif diff --git a/xen/arch/x86/extable.c b/xen/arch/x86/extable.c index c986b74d7c..13a46f69b3 100644 --- a/xen/arch/x86/extable.c +++ b/xen/arch/x86/extable.c @@ -68,7 +68,7 @@ search_exception_table(unsigned long addr) } unsigned long -search_pre_exception_table(struct xen_regs *regs) +search_pre_exception_table(struct cpu_user_regs *regs) { unsigned long addr = (unsigned long)regs->eip; unsigned long fixup = search_one_table( diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 210c52b8af..86b3d7da99 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -17,7 +17,7 @@ irq_desc_t irq_desc[NR_IRQS]; static void __do_IRQ_guest(int irq); -void no_action(int cpl, void *dev_id, struct xen_regs *regs) { } +void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs) { } static void enable_none(unsigned int irq) { } static unsigned int startup_none(unsigned int irq) { return 0; } @@ -87,7 +87,7 @@ void enable_irq(unsigned int irq) spin_unlock_irqrestore(&desc->lock, flags); } -asmlinkage void do_IRQ(struct xen_regs *regs) +asmlinkage void do_IRQ(struct cpu_user_regs *regs) { unsigned int irq = regs->entry_vector; irq_desc_t *desc = &irq_desc[irq]; diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 050a4c0d33..0c7e9da529 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2842,7 +2842,7 @@ int ptwr_do_page_fault(struct domain *d, unsigned long addr) return EXCRET_fault_fixed; emulate: - if ( x86_emulate_memop(get_execution_context(), addr, + if ( x86_emulate_memop(get_cpu_user_regs(), addr, &ptwr_mem_emulator, BITS_PER_LONG/8) ) return 0; perfc_incrc(ptwr_emulations); diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c index 89770b774a..ee798f9a36 100644 --- a/xen/arch/x86/nmi.c +++ b/xen/arch/x86/nmi.c @@ -267,7 +267,7 @@ void touch_nmi_watchdog (void) alert_counter[i] = 0; } -void nmi_watchdog_tick (struct xen_regs * regs) +void nmi_watchdog_tick (struct cpu_user_regs * regs) { int sum, cpu = smp_processor_id(); diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c index 4413d48da0..3651d9c9aa 100644 --- a/xen/arch/x86/shadow.c +++ b/xen/arch/x86/shadow.c @@ -2421,7 +2421,7 @@ void __shadow_sync_all(struct domain *d) free_out_of_sync_state(d); } -int shadow_fault(unsigned long va, struct xen_regs *regs) +int shadow_fault(unsigned long va, struct cpu_user_regs *regs) { l1_pgentry_t gpte, spte, orig_gpte; struct exec_domain *ed = current; diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index a038afbedb..d9a6a5999f 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -51,7 +51,7 @@ static s_time_t stime_irq; /* System time at last 'time update' */ static unsigned long wc_sec, wc_usec; /* UTC time at last 'time update'. */ static rwlock_t time_lock = RW_LOCK_UNLOCKED; -void timer_interrupt(int irq, void *dev_id, struct xen_regs *regs) +void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs) { write_lock_irq(&time_lock); diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 948dd1802b..9d69d69492 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -95,7 +95,7 @@ asmlinkage void machine_check(void); * are disabled). In such situations we can't do much that is safe. We try to * print out some tracing and then we just spin. */ -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs) +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs) { int cpu = smp_processor_id(); unsigned long cr2; @@ -136,7 +136,7 @@ asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs) } static inline int do_trap(int trapnr, char *str, - struct xen_regs *regs, + struct cpu_user_regs *regs, int use_error_code) { struct exec_domain *ed = current; @@ -186,13 +186,13 @@ static inline int do_trap(int trapnr, char *str, } #define DO_ERROR_NOCODE(trapnr, str, name) \ -asmlinkage int do_##name(struct xen_regs *regs) \ +asmlinkage int do_##name(struct cpu_user_regs *regs) \ { \ return do_trap(trapnr, str, regs, 0); \ } #define DO_ERROR(trapnr, str, name) \ -asmlinkage int do_##name(struct xen_regs *regs) \ +asmlinkage int do_##name(struct cpu_user_regs *regs) \ { \ return do_trap(trapnr, str, regs, 1); \ } @@ -209,7 +209,7 @@ DO_ERROR_NOCODE(16, "fpu error", coprocessor_error) DO_ERROR(17, "alignment check", alignment_check) DO_ERROR_NOCODE(19, "simd error", simd_coprocessor_error) -asmlinkage int do_int3(struct xen_regs *regs) +asmlinkage int do_int3(struct cpu_user_regs *regs) { struct exec_domain *ed = current; struct trap_bounce *tb = &ed->arch.trap_bounce; @@ -234,7 +234,7 @@ asmlinkage int do_int3(struct xen_regs *regs) return 0; } -asmlinkage void do_machine_check(struct xen_regs *regs) +asmlinkage void do_machine_check(struct cpu_user_regs *regs) { fatal_trap(TRAP_machine_check, regs); } @@ -257,7 +257,7 @@ void propagate_page_fault(unsigned long addr, u16 error_code) ed->arch.guest_cr2 = addr; } -asmlinkage int do_page_fault(struct xen_regs *regs) +asmlinkage int do_page_fault(struct cpu_user_regs *regs) { unsigned long off, addr, fixup; struct exec_domain *ed = current; @@ -374,7 +374,7 @@ long do_fpu_taskswitch(int set) /* Has the guest requested sufficient permission for this I/O access? */ static inline int guest_io_okay( unsigned int port, unsigned int bytes, - struct exec_domain *ed, struct xen_regs *regs) + struct exec_domain *ed, struct cpu_user_regs *regs) { u16 x; #if defined(__x86_64__) @@ -404,7 +404,7 @@ static inline int guest_io_okay( /* Has the administrator granted sufficient permission for this I/O access? */ static inline int admin_io_okay( unsigned int port, unsigned int bytes, - struct exec_domain *ed, struct xen_regs *regs) + struct exec_domain *ed, struct cpu_user_regs *regs) { struct domain *d = ed->domain; u16 x; @@ -436,7 +436,7 @@ static inline int admin_io_okay( goto read_fault; \ eip += _size; (_type)_x; }) -static int emulate_privileged_op(struct xen_regs *regs) +static int emulate_privileged_op(struct cpu_user_regs *regs) { struct exec_domain *ed = current; unsigned long *reg, eip = regs->eip; @@ -743,7 +743,7 @@ static int emulate_privileged_op(struct xen_regs *regs) return EXCRET_fault_fixed; } -asmlinkage int do_general_protection(struct xen_regs *regs) +asmlinkage int do_general_protection(struct cpu_user_regs *regs) { struct exec_domain *ed = current; struct trap_bounce *tb = &ed->arch.trap_bounce; @@ -851,7 +851,7 @@ static void nmi_softirq(void) send_guest_virq(dom0->exec_domain[0], VIRQ_IO_ERR); } -asmlinkage void mem_parity_error(struct xen_regs *regs) +asmlinkage void mem_parity_error(struct cpu_user_regs *regs) { /* Clear and disable the parity-error line. */ outb((inb(0x61)&15)|4,0x61); @@ -870,7 +870,7 @@ asmlinkage void mem_parity_error(struct xen_regs *regs) } } -asmlinkage void io_check_error(struct xen_regs *regs) +asmlinkage void io_check_error(struct cpu_user_regs *regs) { /* Clear and disable the I/O-error line. */ outb((inb(0x61)&15)|8,0x61); @@ -896,7 +896,7 @@ static void unknown_nmi_error(unsigned char reason) printk("Do you have a strange power saving mode enabled?\n"); } -asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason) +asmlinkage void do_nmi(struct cpu_user_regs *regs, unsigned long reason) { ++nmi_count(smp_processor_id()); @@ -911,7 +911,7 @@ asmlinkage void do_nmi(struct xen_regs *regs, unsigned long reason) unknown_nmi_error((unsigned char)(reason&0xff)); } -asmlinkage int math_state_restore(struct xen_regs *regs) +asmlinkage int math_state_restore(struct cpu_user_regs *regs) { /* Prevent recursion. */ clts(); @@ -936,7 +936,7 @@ asmlinkage int math_state_restore(struct xen_regs *regs) return EXCRET_fault_fixed; } -asmlinkage int do_debug(struct xen_regs *regs) +asmlinkage int do_debug(struct cpu_user_regs *regs) { unsigned long condition; struct exec_domain *ed = current; @@ -978,7 +978,7 @@ asmlinkage int do_debug(struct xen_regs *regs) return EXCRET_not_a_fault; } -asmlinkage int do_spurious_interrupt_bug(struct xen_regs *regs) +asmlinkage int do_spurious_interrupt_bug(struct cpu_user_regs *regs) { return EXCRET_not_a_fault; } diff --git a/xen/arch/x86/vmx.c b/xen/arch/x86/vmx.c index db82c73eac..b67612c3d9 100644 --- a/xen/arch/x86/vmx.c +++ b/xen/arch/x86/vmx.c @@ -46,7 +46,7 @@ unsigned int opt_vmx_debug_level = 0; extern long evtchn_send(int lport); extern long do_block(void); -void do_nmi(struct xen_regs *, unsigned long); +void do_nmi(struct cpu_user_regs *, unsigned long); int start_vmx() { @@ -105,7 +105,7 @@ static void inline __update_guest_eip(unsigned long inst_len) #include -static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs) +static int vmx_do_page_fault(unsigned long va, struct cpu_user_regs *regs) { struct exec_domain *ed = current; unsigned long eip; @@ -154,7 +154,7 @@ static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs) return result; } -static void vmx_do_general_protection_fault(struct xen_regs *regs) +static void vmx_do_general_protection_fault(struct cpu_user_regs *regs) { unsigned long eip, error_code; unsigned long intr_fields; @@ -181,7 +181,7 @@ static void vmx_do_general_protection_fault(struct xen_regs *regs) __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); } -static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs) +static void vmx_vmexit_do_cpuid(unsigned long input, struct cpu_user_regs *regs) { unsigned int eax, ebx, ecx, edx; unsigned long eip; @@ -217,7 +217,7 @@ static void vmx_vmexit_do_cpuid(unsigned long input, struct xen_regs *regs) #define CASE_GET_REG_P(REG, reg) \ case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break -static void vmx_dr_access (unsigned long exit_qualification, struct xen_regs *regs) +static void vmx_dr_access (unsigned long exit_qualification, struct cpu_user_regs *regs) { unsigned int reg; unsigned long *reg_p = 0; @@ -288,7 +288,7 @@ static void vmx_vmexit_do_invlpg(unsigned long va) shadow_invlpg(ed, va); } -static void vmx_io_instruction(struct xen_regs *regs, +static void vmx_io_instruction(struct cpu_user_regs *regs, unsigned long exit_qualification, unsigned long inst_len) { struct exec_domain *d = current; @@ -728,7 +728,7 @@ static int vmx_set_cr0(unsigned long value) /* * Write to control registers */ -static int mov_to_cr(int gp, int cr, struct xen_regs *regs) +static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs) { unsigned long value; unsigned long old_cr; @@ -847,7 +847,7 @@ static int mov_to_cr(int gp, int cr, struct xen_regs *regs) /* * Read from control registers. CR0 and CR4 are read from the shadow. */ -static void mov_from_cr(int cr, int gp, struct xen_regs *regs) +static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs) { unsigned long value; struct exec_domain *d = current; @@ -878,7 +878,7 @@ static void mov_from_cr(int cr, int gp, struct xen_regs *regs) VMX_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx,", cr, value); } -static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs) +static int vmx_cr_access(unsigned long exit_qualification, struct cpu_user_regs *regs) { unsigned int gp, cr; unsigned long value; @@ -916,7 +916,7 @@ static int vmx_cr_access(unsigned long exit_qualification, struct xen_regs *regs return 1; } -static inline void vmx_do_msr_read(struct xen_regs *regs) +static inline void vmx_do_msr_read(struct cpu_user_regs *regs) { VMX_DBG_LOG(DBG_LEVEL_1, "vmx_do_msr_read: ecx=%lx, eax=%lx, edx=%lx", (unsigned long)regs->ecx, (unsigned long)regs->eax, @@ -973,7 +973,7 @@ static void vmx_print_line(const char c, struct exec_domain *d) print_buf[index++] = c; } -void save_vmx_execution_context(execution_context_t *ctxt) +void save_vmx_cpu_user_regs(struct cpu_user_regs *ctxt) { __vmread(GUEST_SS_SELECTOR, &ctxt->ss); __vmread(GUEST_ESP, &ctxt->esp); @@ -988,7 +988,7 @@ void save_vmx_execution_context(execution_context_t *ctxt) } #ifdef XEN_DEBUGGER -void save_xen_regs(struct xen_regs *regs) +void save_cpu_user_regs(struct cpu_user_regs *regs) { __vmread(GUEST_SS_SELECTOR, ®s->xss); __vmread(GUEST_ESP, ®s->esp); @@ -1002,7 +1002,7 @@ void save_xen_regs(struct xen_regs *regs) __vmread(GUEST_DS_SELECTOR, ®s->xds); } -void restore_xen_regs(struct xen_regs *regs) +void restore_cpu_user_regs(struct cpu_user_regs *regs) { __vmwrite(GUEST_SS_SELECTOR, regs->xss); __vmwrite(GUEST_ESP, regs->esp); @@ -1017,7 +1017,7 @@ void restore_xen_regs(struct xen_regs *regs) } #endif -asmlinkage void vmx_vmexit_handler(struct xen_regs regs) +asmlinkage void vmx_vmexit_handler(struct cpu_user_regs regs) { unsigned int exit_reason, idtv_info_field; unsigned long exit_qualification, eip, inst_len = 0; @@ -1080,16 +1080,16 @@ asmlinkage void vmx_vmexit_handler(struct xen_regs regs) #ifdef XEN_DEBUGGER case TRAP_debug: { - save_xen_regs(®s); + save_cpu_user_regs(®s); pdb_handle_exception(1, ®s, 1); - restore_xen_regs(®s); + restore_cpu_user_regs(®s); break; } case TRAP_int3: { - save_xen_regs(®s); + save_cpu_user_regs(®s); pdb_handle_exception(3, ®s, 1); - restore_xen_regs(®s); + restore_cpu_user_regs(®s); break; } #endif @@ -1139,9 +1139,9 @@ asmlinkage void vmx_vmexit_handler(struct xen_regs regs) case EXIT_REASON_EXTERNAL_INTERRUPT: { extern int vector_irq[]; - extern asmlinkage void do_IRQ(struct xen_regs *); - extern void smp_apic_timer_interrupt(struct xen_regs *); - extern void timer_interrupt(int, void *, struct xen_regs *); + extern asmlinkage void do_IRQ(struct cpu_user_regs *); + extern void smp_apic_timer_interrupt(struct cpu_user_regs *); + extern void timer_interrupt(int, void *, struct cpu_user_regs *); unsigned int vector; if ((error = __vmread(VM_EXIT_INTR_INFO, &vector)) diff --git a/xen/arch/x86/vmx_intercept.c b/xen/arch/x86/vmx_intercept.c index e7ef63bf9b..e23ac6b9f1 100644 --- a/xen/arch/x86/vmx_intercept.c +++ b/xen/arch/x86/vmx_intercept.c @@ -140,19 +140,19 @@ static int pit_read_io(struct vmx_virpit_t *vpit) /* vmx_io_assist light-weight version, specific to PIT DM */ static void resume_pit_io(ioreq_t *p) { - execution_context_t *ec = get_execution_context(); - unsigned long old_eax = ec->eax; + struct cpu_user_regs *regs = get_cpu_user_regs(); + unsigned long old_eax = regs->eax; p->state = STATE_INVALID; switch(p->size) { case 1: - ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); + regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); break; case 2: - ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); + regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); break; case 4: - ec->eax = (p->u.data & 0xffffffff); + regs->eax = (p->u.data & 0xffffffff); break; default: BUG(); diff --git a/xen/arch/x86/vmx_io.c b/xen/arch/x86/vmx_io.c index fda53fc9af..05780a2157 100644 --- a/xen/arch/x86/vmx_io.c +++ b/xen/arch/x86/vmx_io.c @@ -38,7 +38,7 @@ extern long do_block(); #if defined (__i386__) -static void load_xen_regs(struct xen_regs *regs) +static void load_cpu_user_regs(struct cpu_user_regs *regs) { /* * Write the guest register value into VMCS @@ -50,7 +50,7 @@ static void load_xen_regs(struct xen_regs *regs) __vmwrite(GUEST_EIP, regs->eip); } -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value) +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value) { switch (size) { case BYTE: @@ -170,12 +170,12 @@ static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, } } #else -static void load_xen_regs(struct xen_regs *regs) +static void load_cpu_user_regs(struct cpu_user_regs *regs) { /* XXX: TBD */ return; } -static void set_reg_value (int size, int index, int seg, struct xen_regs *regs, long value) +static void set_reg_value (int size, int index, int seg, struct cpu_user_regs *regs, long value) { /* XXX: TBD */ return; @@ -187,11 +187,11 @@ void vmx_io_assist(struct exec_domain *ed) vcpu_iodata_t *vio; ioreq_t *p; struct domain *d = ed->domain; - execution_context_t *ec = get_execution_context(); + struct cpu_user_regs *regs = get_cpu_user_regs(); unsigned long old_eax; int sign; struct mi_per_cpu_info *mpci_p; - struct xen_regs *inst_decoder_regs; + struct cpu_user_regs *inst_decoder_regs; mpci_p = &ed->arch.arch_vmx.vmx_platform.mpci; inst_decoder_regs = mpci_p->inst_decoder_regs; @@ -230,8 +230,8 @@ void vmx_io_assist(struct exec_domain *ed) sign = (p->df) ? -1 : 1; if (p->port_mm) { if (p->pdata_valid) { - ec->esi += sign * p->count * p->size; - ec->edi += sign * p->count * p->size; + regs->esi += sign * p->count * p->size; + regs->edi += sign * p->count * p->size; } else { if (p->dir == IOREQ_WRITE) { return; @@ -244,38 +244,38 @@ void vmx_io_assist(struct exec_domain *ed) if (ed->arch.arch_vmx.vmx_platform.mpci.mmio_target & WZEROEXTEND) { p->u.data = p->u.data & 0xffff; } - set_reg_value(size, index, 0, (struct xen_regs *)ec, p->u.data); + set_reg_value(size, index, 0, regs, p->u.data); } - load_xen_regs((struct xen_regs *)ec); + load_cpu_user_regs(regs); return; } if (p->dir == IOREQ_WRITE) { if (p->pdata_valid) { - ec->esi += sign * p->count * p->size; - ec->ecx -= p->count; + regs->esi += sign * p->count * p->size; + regs->ecx -= p->count; } return; } else { if (p->pdata_valid) { - ec->edi += sign * p->count * p->size; - ec->ecx -= p->count; + regs->edi += sign * p->count * p->size; + regs->ecx -= p->count; return; } } - old_eax = ec->eax; + old_eax = regs->eax; switch(p->size) { case 1: - ec->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); + regs->eax = (old_eax & 0xffffff00) | (p->u.data & 0xff); break; case 2: - ec->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); + regs->eax = (old_eax & 0xffff0000) | (p->u.data & 0xffff); break; case 4: - ec->eax = (p->u.data & 0xffffffff); + regs->eax = (p->u.data & 0xffffffff); break; default: BUG(); diff --git a/xen/arch/x86/vmx_platform.c b/xen/arch/x86/vmx_platform.c index 5649597e97..8de185c072 100644 --- a/xen/arch/x86/vmx_platform.c +++ b/xen/arch/x86/vmx_platform.c @@ -39,17 +39,17 @@ #define DECODE_failure 0 #if defined (__x86_64__) -static void store_xen_regs(struct xen_regs *regs) +static void store_cpu_user_regs(struct cpu_user_regs *regs) { } -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) { return 0; } #elif defined (__i386__) -static void store_xen_regs(struct xen_regs *regs) +static void store_cpu_user_regs(struct cpu_user_regs *regs) { __vmread(GUEST_SS_SELECTOR, ®s->ss); __vmread(GUEST_ESP, ®s->esp); @@ -60,7 +60,7 @@ static void store_xen_regs(struct xen_regs *regs) __vmread(GUEST_EIP, ®s->eip); } -static long get_reg_value(int size, int index, int seg, struct xen_regs *regs) +static long get_reg_value(int size, int index, int seg, struct cpu_user_regs *regs) { /* * Reference the db_reg[] table @@ -468,7 +468,7 @@ static void send_mmio_req(unsigned long gpa, ioreq_t *p; int vm86; struct mi_per_cpu_info *mpci_p; - struct xen_regs *inst_decoder_regs; + struct cpu_user_regs *inst_decoder_regs; extern long evtchn_send(int lport); extern long do_block(void); @@ -528,7 +528,7 @@ void handle_mmio(unsigned long va, unsigned long gpa) unsigned long eip, eflags, cs; unsigned long inst_len, inst_addr; struct mi_per_cpu_info *mpci_p; - struct xen_regs *inst_decoder_regs; + struct cpu_user_regs *inst_decoder_regs; struct instruction mmio_inst; unsigned char inst[MAX_INST_LEN]; int vm86, ret; @@ -569,7 +569,7 @@ void handle_mmio(unsigned long va, unsigned long gpa) domain_crash_synchronous(); __vmwrite(GUEST_EIP, eip + inst_len); - store_xen_regs(inst_decoder_regs); + store_cpu_user_regs(inst_decoder_regs); // Only handle "mov" and "movs" instructions! if (!strncmp((char *)mmio_inst.i_name, "movz", 4)) { diff --git a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c index a54db49cdf..90cc88122c 100644 --- a/xen/arch/x86/vmx_vmcs.c +++ b/xen/arch/x86/vmx_vmcs.c @@ -100,7 +100,7 @@ struct host_execution_env { #define round_pgdown(_p) ((_p)&PAGE_MASK) /* coped from domain.c */ -int vmx_setup_platform(struct exec_domain *d, execution_context_t *context) +int vmx_setup_platform(struct exec_domain *d, struct cpu_user_regs *regs) { int i; unsigned int n; @@ -108,15 +108,15 @@ int vmx_setup_platform(struct exec_domain *d, execution_context_t *context) struct e820entry *e820p; unsigned long gpfn = 0; - context->ebx = 0; /* Linux expects ebx to be 0 for boot proc */ + regs->ebx = 0; /* Linux expects ebx to be 0 for boot proc */ - n = context->ecx; + n = regs->ecx; if (n > 32) { VMX_DBG_LOG(DBG_LEVEL_1, "Too many e820 entries: %d", n); return -1; } - addr = context->edi; + addr = regs->edi; offset = (addr & ~PAGE_MASK); addr = round_pgdown(addr); mpfn = phys_to_machine_mapping(addr >> PAGE_SHIFT); @@ -162,14 +162,14 @@ void vmx_do_launch(struct exec_domain *ed) struct Xgt_desc_struct desc; unsigned long pfn = 0; struct pfn_info *page; - execution_context_t *ec = get_execution_context(); + struct cpu_user_regs *regs = get_cpu_user_regs(); cpu = smp_processor_id(); page = (struct pfn_info *) alloc_domheap_page(NULL); pfn = (unsigned long) (page - frame_table); - vmx_setup_platform(ed, ec); + vmx_setup_platform(ed, regs); __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory"); host_env.gdtr_limit = desc.size; @@ -202,8 +202,8 @@ void vmx_do_launch(struct exec_domain *ed) * Initially set the same environement as host. */ static inline int -construct_init_vmcs_guest(execution_context_t *context, - full_execution_context_t *full_context, +construct_init_vmcs_guest(struct cpu_user_regs *regs, + struct vcpu_guest_context *ctxt, struct host_execution_env *host_env) { int error = 0; @@ -232,12 +232,12 @@ construct_init_vmcs_guest(execution_context_t *context, error |= __vmwrite(CR3_TARGET_COUNT, 0); /* Guest Selectors */ - error |= __vmwrite(GUEST_CS_SELECTOR, context->cs); - error |= __vmwrite(GUEST_ES_SELECTOR, context->es); - error |= __vmwrite(GUEST_SS_SELECTOR, context->ss); - error |= __vmwrite(GUEST_DS_SELECTOR, context->ds); - error |= __vmwrite(GUEST_FS_SELECTOR, context->fs); - error |= __vmwrite(GUEST_GS_SELECTOR, context->gs); + error |= __vmwrite(GUEST_CS_SELECTOR, regs->cs); + error |= __vmwrite(GUEST_ES_SELECTOR, regs->es); + error |= __vmwrite(GUEST_SS_SELECTOR, regs->ss); + error |= __vmwrite(GUEST_DS_SELECTOR, regs->ds); + error |= __vmwrite(GUEST_FS_SELECTOR, regs->fs); + error |= __vmwrite(GUEST_GS_SELECTOR, regs->gs); /* Guest segment Limits */ error |= __vmwrite(GUEST_CS_LIMIT, GUEST_SEGMENT_LIMIT); @@ -268,10 +268,10 @@ construct_init_vmcs_guest(execution_context_t *context, arbytes.fields.seg_type = 0xb; /* type = 0xb */ error |= __vmwrite(GUEST_CS_AR_BYTES, arbytes.bytes); - error |= __vmwrite(GUEST_GDTR_BASE, context->edx); - context->edx = 0; - error |= __vmwrite(GUEST_GDTR_LIMIT, context->eax); - context->eax = 0; + error |= __vmwrite(GUEST_GDTR_BASE, regs->edx); + regs->edx = 0; + error |= __vmwrite(GUEST_GDTR_LIMIT, regs->eax); + regs->eax = 0; arbytes.fields.s = 0; /* not code or data segement */ arbytes.fields.seg_type = 0x2; /* LTD */ @@ -302,10 +302,10 @@ construct_init_vmcs_guest(execution_context_t *context, error |= __vmwrite(GUEST_GS_BASE, host_env->ds_base); error |= __vmwrite(GUEST_IDTR_BASE, host_env->idtr_base); - error |= __vmwrite(GUEST_ESP, context->esp); - error |= __vmwrite(GUEST_EIP, context->eip); + error |= __vmwrite(GUEST_ESP, regs->esp); + error |= __vmwrite(GUEST_EIP, regs->eip); - eflags = context->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */ + eflags = regs->eflags & ~VMCS_EFLAGS_RESERVED_0; /* clear 0s */ eflags |= VMCS_EFLAGS_RESERVED_1; /* set 1s */ error |= __vmwrite(GUEST_EFLAGS, eflags); @@ -380,8 +380,8 @@ static inline int construct_vmcs_host(struct host_execution_env *host_env) */ int construct_vmcs(struct arch_vmx_struct *arch_vmx, - execution_context_t *context, - full_execution_context_t *full_context, + struct cpu_user_regs *regs, + struct vcpu_guest_context *ctxt, int use_host_env) { int error; @@ -415,7 +415,7 @@ int construct_vmcs(struct arch_vmx_struct *arch_vmx, return -EINVAL; } /* guest selectors */ - if ((error = construct_init_vmcs_guest(context, full_context, &host_env))) { + if ((error = construct_init_vmcs_guest(regs, ctxt, &host_env))) { printk("construct_vmcs: construct_vmcs_guest failed\n"); return -EINVAL; } diff --git a/xen/arch/x86/x86_32/asm-offsets.c b/xen/arch/x86/x86_32/asm-offsets.c index 1a16ab2111..8e9ad32f6a 100644 --- a/xen/arch/x86/x86_32/asm-offsets.c +++ b/xen/arch/x86/x86_32/asm-offsets.c @@ -24,26 +24,26 @@ void __dummy__(void) { - OFFSET(XREGS_eax, struct xen_regs, eax); - OFFSET(XREGS_ebx, struct xen_regs, ebx); - OFFSET(XREGS_ecx, struct xen_regs, ecx); - OFFSET(XREGS_edx, struct xen_regs, edx); - OFFSET(XREGS_esi, struct xen_regs, esi); - OFFSET(XREGS_edi, struct xen_regs, edi); - OFFSET(XREGS_esp, struct xen_regs, esp); - OFFSET(XREGS_ebp, struct xen_regs, ebp); - OFFSET(XREGS_eip, struct xen_regs, eip); - OFFSET(XREGS_cs, struct xen_regs, cs); - OFFSET(XREGS_ds, struct xen_regs, ds); - OFFSET(XREGS_es, struct xen_regs, es); - OFFSET(XREGS_fs, struct xen_regs, fs); - OFFSET(XREGS_gs, struct xen_regs, gs); - OFFSET(XREGS_ss, struct xen_regs, ss); - OFFSET(XREGS_eflags, struct xen_regs, eflags); - OFFSET(XREGS_error_code, struct xen_regs, error_code); - OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector); - OFFSET(XREGS_kernel_sizeof, struct xen_regs, esp); - DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs)); + OFFSET(UREGS_eax, struct cpu_user_regs, eax); + OFFSET(UREGS_ebx, struct cpu_user_regs, ebx); + OFFSET(UREGS_ecx, struct cpu_user_regs, ecx); + OFFSET(UREGS_edx, struct cpu_user_regs, edx); + OFFSET(UREGS_esi, struct cpu_user_regs, esi); + OFFSET(UREGS_edi, struct cpu_user_regs, edi); + OFFSET(UREGS_esp, struct cpu_user_regs, esp); + OFFSET(UREGS_ebp, struct cpu_user_regs, ebp); + OFFSET(UREGS_eip, struct cpu_user_regs, eip); + OFFSET(UREGS_cs, struct cpu_user_regs, cs); + OFFSET(UREGS_ds, struct cpu_user_regs, ds); + OFFSET(UREGS_es, struct cpu_user_regs, es); + OFFSET(UREGS_fs, struct cpu_user_regs, fs); + OFFSET(UREGS_gs, struct cpu_user_regs, gs); + OFFSET(UREGS_ss, struct cpu_user_regs, ss); + OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); + OFFSET(UREGS_error_code, struct cpu_user_regs, error_code); + OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector); + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, esp); + DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); BLANK(); OFFSET(EDOMAIN_processor, struct exec_domain, processor); diff --git a/xen/arch/x86/x86_32/call_with_regs.S b/xen/arch/x86/x86_32/call_with_regs.S index 71b5b73eea..d3656d74e4 100644 --- a/xen/arch/x86/x86_32/call_with_regs.S +++ b/xen/arch/x86/x86_32/call_with_regs.S @@ -2,35 +2,35 @@ #include - // int call_with_registers(void (*f)(struct xen_regs *r)) -> - // build a xen_regs structure, and then call f with that. + // int call_with_registers(void (*f)(struct cpu_user_regs *r)) -> + // build a cpu_user_regs structure, and then call f with that. call_with_registers: pushf - subl $XREGS_user_sizeof, %esp - movl %ebx, XREGS_ebx(%esp) - movl %ecx, XREGS_ecx(%esp) - movl %edx, XREGS_edx(%esp) - movl %esi, XREGS_esi(%esp) - movl %edi, XREGS_edi(%esp) - movl %ebp, XREGS_ebp(%esp) - movl %eax, XREGS_eax(%esp) - movw $0, XREGS_error_code(%esp) - movw $0, XREGS_entry_vector(%esp) - movl XREGS_user_sizeof+4(%esp), %eax - movl %eax, XREGS_eip(%esp) - movl %cs, XREGS_cs(%esp) - movl XREGS_user_sizeof(%esp), %eax - movl %eax, XREGS_eflags(%esp) - movl %esp, XREGS_esp(%esp) - addl $XREGS_user_sizeof+4, XREGS_esp(%esp) - movl %ss, XREGS_ss(%esp) - movl %es, XREGS_es(%esp) - movl %ds, XREGS_ds(%esp) - movl %fs, XREGS_fs(%esp) - movl %gs, XREGS_gs(%esp) + subl $UREGS_user_sizeof, %esp + movl %ebx, UREGS_ebx(%esp) + movl %ecx, UREGS_ecx(%esp) + movl %edx, UREGS_edx(%esp) + movl %esi, UREGS_esi(%esp) + movl %edi, UREGS_edi(%esp) + movl %ebp, UREGS_ebp(%esp) + movl %eax, UREGS_eax(%esp) + movw $0, UREGS_error_code(%esp) + movw $0, UREGS_entry_vector(%esp) + movl UREGS_user_sizeof+4(%esp), %eax + movl %eax, UREGS_eip(%esp) + movl %cs, UREGS_cs(%esp) + movl UREGS_user_sizeof(%esp), %eax + movl %eax, UREGS_eflags(%esp) + movl %esp, UREGS_esp(%esp) + addl $UREGS_user_sizeof+4, UREGS_esp(%esp) + movl %ss, UREGS_ss(%esp) + movl %es, UREGS_es(%esp) + movl %ds, UREGS_ds(%esp) + movl %fs, UREGS_fs(%esp) + movl %gs, UREGS_gs(%esp) - movl XREGS_user_sizeof+8(%esp), %eax + movl UREGS_user_sizeof+8(%esp), %eax pushl %esp call *%eax - add $XREGS_user_sizeof + 8, %esp + add $UREGS_user_sizeof + 8, %esp ret diff --git a/xen/arch/x86/x86_32/entry.S b/xen/arch/x86/x86_32/entry.S index aa35590d27..60f154b270 100644 --- a/xen/arch/x86/x86_32/entry.S +++ b/xen/arch/x86/x86_32/entry.S @@ -76,7 +76,7 @@ * and we set it to the fixed value. * * We also need the room, especially because orig_eax field is used - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following: + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following: * (10) u32 gs; * (9) u32 fs; * (8) u32 ds; @@ -99,7 +99,7 @@ pushl $VMX_MONITOR_EFLAGS; \ popf; \ subl $(NR_SKIPPED_REGS*4), %esp; \ - movl $0, 0xc(%esp); /* eflags==0 identifies xen_regs as VMX guest */ \ + movl $0, 0xc(%esp); /* eflags==0 identifies cpu_user_regs as VMX guest */ \ pushl %eax; \ pushl %ebp; \ pushl %edi; \ @@ -174,12 +174,12 @@ vmx_process_softirqs: ALIGN restore_all_guest: - testl $X86_EFLAGS_VM,XREGS_eflags(%esp) + testl $X86_EFLAGS_VM,UREGS_eflags(%esp) jnz restore_all_vm86 -FLT1: movl XREGS_ds(%esp),%ds -FLT2: movl XREGS_es(%esp),%es -FLT3: movl XREGS_fs(%esp),%fs -FLT4: movl XREGS_gs(%esp),%gs +FLT1: movl UREGS_ds(%esp),%ds +FLT2: movl UREGS_es(%esp),%es +FLT3: movl UREGS_fs(%esp),%fs +FLT4: movl UREGS_gs(%esp),%gs restore_all_vm86: popl %ebx popl %ecx @@ -193,13 +193,13 @@ FLT5: iret .section .fixup,"ax" FIX5: subl $28,%esp pushl 28(%esp) # error_code/entry_vector - movl %eax,XREGS_eax+4(%esp) - movl %ebp,XREGS_ebp+4(%esp) - movl %edi,XREGS_edi+4(%esp) - movl %esi,XREGS_esi+4(%esp) - movl %edx,XREGS_edx+4(%esp) - movl %ecx,XREGS_ecx+4(%esp) - movl %ebx,XREGS_ebx+4(%esp) + movl %eax,UREGS_eax+4(%esp) + movl %ebp,UREGS_ebp+4(%esp) + movl %edi,UREGS_edi+4(%esp) + movl %esi,UREGS_esi+4(%esp) + movl %edx,UREGS_edx+4(%esp) + movl %ecx,UREGS_ecx+4(%esp) + movl %ebx,UREGS_ebx+4(%esp) FIX1: SET_XEN_SEGMENTS(a) movl %eax,%fs movl %eax,%gs @@ -224,10 +224,10 @@ failsafe_callback: movw $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx) call create_bounce_frame xorl %eax,%eax - movl %eax,XREGS_ds(%esp) - movl %eax,XREGS_es(%esp) - movl %eax,XREGS_fs(%esp) - movl %eax,XREGS_gs(%esp) + movl %eax,UREGS_ds(%esp) + movl %eax,UREGS_es(%esp) + movl %eax,UREGS_fs(%esp) + movl %eax,UREGS_gs(%esp) jmp test_all_events .previous .section __pre_ex_table,"a" @@ -262,7 +262,7 @@ ENTRY(hypercall) andl $(NR_hypercalls-1),%eax PERFC_INCR(PERFC_hypercalls, %eax) call *SYMBOL_NAME(hypercall_table)(,%eax,4) - movl %eax,XREGS_eax(%esp) # save the return value + movl %eax,UREGS_eax(%esp) # save the return value test_all_events: xorl %ecx,%ecx @@ -301,41 +301,41 @@ process_softirqs: /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK: */ /* {EIP, CS, EFLAGS, [ESP, SS]} */ /* %edx == trap_bounce, %ebx == struct exec_domain */ -/* %eax,%ecx are clobbered. %gs:%esi contain new XREGS_ss/XREGS_esp. */ +/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */ create_bounce_frame: - movl XREGS_eflags+4(%esp),%ecx - movb XREGS_cs+4(%esp),%cl + movl UREGS_eflags+4(%esp),%ecx + movb UREGS_cs+4(%esp),%cl testl $(2|X86_EFLAGS_VM),%ecx jz ring1 /* jump if returning to an existing ring-1 activation */ movl EDOMAIN_kernel_sp(%ebx),%esi FLT6: movl EDOMAIN_kernel_ss(%ebx),%gs - testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) + testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) jz nvm86_1 subl $16,%esi /* push ES/DS/FS/GS (VM86 stack frame) */ - movl XREGS_es+4(%esp),%eax + movl UREGS_es+4(%esp),%eax FLT7: movl %eax,%gs:(%esi) - movl XREGS_ds+4(%esp),%eax + movl UREGS_ds+4(%esp),%eax FLT8: movl %eax,%gs:4(%esi) - movl XREGS_fs+4(%esp),%eax + movl UREGS_fs+4(%esp),%eax FLT9: movl %eax,%gs:8(%esi) - movl XREGS_gs+4(%esp),%eax + movl UREGS_gs+4(%esp),%eax FLT10: movl %eax,%gs:12(%esi) nvm86_1:subl $8,%esi /* push SS/ESP (inter-priv iret) */ - movl XREGS_esp+4(%esp),%eax + movl UREGS_esp+4(%esp),%eax FLT11: movl %eax,%gs:(%esi) - movl XREGS_ss+4(%esp),%eax + movl UREGS_ss+4(%esp),%eax FLT12: movl %eax,%gs:4(%esi) jmp 1f ring1: /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */ - movl XREGS_esp+4(%esp),%esi -FLT13: movl XREGS_ss+4(%esp),%gs + movl UREGS_esp+4(%esp),%esi +FLT13: movl UREGS_ss+4(%esp),%gs 1: /* Construct a stack frame: EFLAGS, CS/EIP */ subl $12,%esi - movl XREGS_eip+4(%esp),%eax + movl UREGS_eip+4(%esp),%eax FLT14: movl %eax,%gs:(%esi) - movl XREGS_cs+4(%esp),%eax + movl UREGS_cs+4(%esp),%eax FLT15: movl %eax,%gs:4(%esi) - movl XREGS_eflags+4(%esp),%eax + movl UREGS_eflags+4(%esp),%eax FLT16: movl %eax,%gs:8(%esi) movb TRAPBOUNCE_flags(%edx),%cl test $TBF_EXCEPTION_ERRCODE,%cl @@ -351,7 +351,7 @@ FLT18: movl %eax,%gs:(%esi) 1: testb $TBF_FAILSAFE,%cl jz 2f subl $16,%esi # add DS/ES/FS/GS to failsafe stack frame - testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) + testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) jz nvm86_2 xorl %eax,%eax # VM86: we write zero selector values FLT19: movl %eax,%gs:(%esi) @@ -359,30 +359,30 @@ FLT20: movl %eax,%gs:4(%esi) FLT21: movl %eax,%gs:8(%esi) FLT22: movl %eax,%gs:12(%esi) jmp 2f -nvm86_2:movl XREGS_ds+4(%esp),%eax # non-VM86: write real selector values +nvm86_2:movl UREGS_ds+4(%esp),%eax # non-VM86: write real selector values FLT23: movl %eax,%gs:(%esi) - movl XREGS_es+4(%esp),%eax + movl UREGS_es+4(%esp),%eax FLT24: movl %eax,%gs:4(%esi) - movl XREGS_fs+4(%esp),%eax + movl UREGS_fs+4(%esp),%eax FLT25: movl %eax,%gs:8(%esi) - movl XREGS_gs+4(%esp),%eax + movl UREGS_gs+4(%esp),%eax FLT26: movl %eax,%gs:12(%esi) -2: testl $X86_EFLAGS_VM,XREGS_eflags+4(%esp) +2: testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp) jz nvm86_3 xorl %eax,%eax /* zero DS-GS, just as a real CPU would */ - movl %eax,XREGS_ds+4(%esp) - movl %eax,XREGS_es+4(%esp) - movl %eax,XREGS_fs+4(%esp) - movl %eax,XREGS_gs+4(%esp) + movl %eax,UREGS_ds+4(%esp) + movl %eax,UREGS_es+4(%esp) + movl %eax,UREGS_fs+4(%esp) + movl %eax,UREGS_gs+4(%esp) nvm86_3:/* Rewrite our stack frame and return to ring 1. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ - andl $0xfffcbeff,XREGS_eflags+4(%esp) - movl %gs,XREGS_ss+4(%esp) - movl %esi,XREGS_esp+4(%esp) + andl $0xfffcbeff,UREGS_eflags+4(%esp) + movl %gs,UREGS_ss+4(%esp) + movl %esi,UREGS_esp+4(%esp) movzwl TRAPBOUNCE_cs(%edx),%eax - movl %eax,XREGS_cs+4(%esp) + movl %eax,UREGS_cs+4(%esp) movl TRAPBOUNCE_eip(%edx),%eax - movl %eax,XREGS_eip+4(%esp) + movl %eax,UREGS_eip+4(%esp) movb $0,TRAPBOUNCE_flags(%edx) ret .section __ex_table,"a" @@ -410,8 +410,8 @@ process_guest_exception_and_events: ALIGN ENTRY(ret_from_intr) GET_CURRENT(%ebx) - movl XREGS_eflags(%esp),%eax - movb XREGS_cs(%esp),%al + movl UREGS_eflags(%esp),%eax + movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax jnz test_all_events jmp restore_all_xen @@ -422,26 +422,26 @@ ENTRY(divide_error) error_code: SAVE_ALL_NOSEGREGS(a) SET_XEN_SEGMENTS(a) - testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%esp) + testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp) jz exception_with_ints_disabled sti # re-enable interrupts xorl %eax,%eax - movw XREGS_entry_vector(%esp),%ax + movw UREGS_entry_vector(%esp),%ax movl %esp,%edx - pushl %edx # push the xen_regs pointer + pushl %edx # push the cpu_user_regs pointer GET_CURRENT(%ebx) PERFC_INCR(PERFC_exceptions, %eax) call *SYMBOL_NAME(exception_table)(,%eax,4) addl $4,%esp - movl XREGS_eflags(%esp),%eax - movb XREGS_cs(%esp),%al + movl UREGS_eflags(%esp),%eax + movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax jz restore_all_xen jmp process_guest_exception_and_events exception_with_ints_disabled: - movl XREGS_eflags(%esp),%eax - movb XREGS_cs(%esp),%al + movl UREGS_eflags(%esp),%eax + movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax # interrupts disabled outside Xen? jnz FATAL_exception_with_ints_disabled pushl %esp @@ -449,21 +449,21 @@ exception_with_ints_disabled: addl $4,%esp testl %eax,%eax # no fixup code for faulting EIP? jz FATAL_exception_with_ints_disabled - movl %eax,XREGS_eip(%esp) + movl %eax,UREGS_eip(%esp) movl %esp,%esi subl $4,%esp movl %esp,%edi - movl $XREGS_kernel_sizeof/4,%ecx + movl $UREGS_kernel_sizeof/4,%ecx rep; movsl # make room for error_code/entry_vector - movl XREGS_error_code(%esp),%eax # error_code/entry_vector - movl %eax,XREGS_kernel_sizeof(%esp) + movl UREGS_error_code(%esp),%eax # error_code/entry_vector + movl %eax,UREGS_kernel_sizeof(%esp) jmp restore_all_xen # return to fixup code FATAL_exception_with_ints_disabled: xorl %esi,%esi - movw XREGS_entry_vector(%esp),%si + movw UREGS_entry_vector(%esp),%si movl %esp,%edx - pushl %edx # push the xen_regs pointer + pushl %edx # push the cpu_user_regs pointer pushl %esi # push the trapnr (entry vector) call SYMBOL_NAME(fatal_trap) ud2 @@ -557,8 +557,8 @@ ENTRY(nmi) # In all other cases we bail without touching DS-GS, as we have # interrupted an enclosing Xen activation in tricky prologue or # epilogue code. - movl XREGS_eflags(%esp),%eax - movb XREGS_cs(%esp),%al + movl UREGS_eflags(%esp),%eax + movb UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax jnz do_watchdog_tick movl %ds,%eax @@ -608,8 +608,8 @@ nmi_parity_err: push %edx call SYMBOL_NAME(mem_parity_error) addl $4,%esp -nmi_out:movl %ss:XREGS_eflags(%esp),%eax - movb %ss:XREGS_cs(%esp),%al +nmi_out:movl %ss:UREGS_eflags(%esp),%eax + movb %ss:UREGS_cs(%esp),%al testl $(3|X86_EFLAGS_VM),%eax jz restore_all_xen movl $(__HYPERVISOR_DS),%edx @@ -657,27 +657,27 @@ do_switch_vm86: addl $4,%esp # GS:ESI == Ring-1 stack activation - movl XREGS_esp(%esp),%esi -VFLT1: movl XREGS_ss(%esp),%gs + movl UREGS_esp(%esp),%esi +VFLT1: movl UREGS_ss(%esp),%gs # ES:EDI == Ring-0 stack activation - leal XREGS_eip(%esp),%edi + leal UREGS_eip(%esp),%edi # Restore the hypercall-number-clobbered EAX on our stack frame VFLT2: movl %gs:(%esi),%eax - movl %eax,XREGS_eax(%esp) + movl %eax,UREGS_eax(%esp) addl $4,%esi # Copy the VM86 activation from the ring-1 stack to the ring-0 stack - movl $(XREGS_user_sizeof-XREGS_eip)/4,%ecx + movl $(UREGS_user_sizeof-UREGS_eip)/4,%ecx VFLT3: movl %gs:(%esi),%eax stosl addl $4,%esi loop VFLT3 # Fix up EFLAGS: IOPL=0, IF=1, VM=1 - andl $~X86_EFLAGS_IOPL,XREGS_eflags(%esp) - orl $X86_EFLAGS_IF|X86_EFLAGS_VM,XREGS_eflags(%esp) + andl $~X86_EFLAGS_IOPL,UREGS_eflags(%esp) + orl $X86_EFLAGS_IF|X86_EFLAGS_VM,UREGS_eflags(%esp) jmp test_all_events diff --git a/xen/arch/x86/x86_32/seg_fixup.c b/xen/arch/x86/x86_32/seg_fixup.c index b9ed3f7ece..4f448b7ae1 100644 --- a/xen/arch/x86/x86_32/seg_fixup.c +++ b/xen/arch/x86/x86_32/seg_fixup.c @@ -263,7 +263,7 @@ int fixup_seg(u16 seg, unsigned long offset) * Called from the general-protection fault handler to attempt to decode * and emulate an instruction that depends on 4GB segments. */ -int gpf_emulate_4gb(struct xen_regs *regs) +int gpf_emulate_4gb(struct cpu_user_regs *regs) { struct exec_domain *d = current; trap_info_t *ti; diff --git a/xen/arch/x86/x86_32/traps.c b/xen/arch/x86/x86_32/traps.c index 70bccd4017..a5be49896d 100644 --- a/xen/arch/x86/x86_32/traps.c +++ b/xen/arch/x86/x86_32/traps.c @@ -29,9 +29,10 @@ static inline int kernel_text_address(unsigned long addr) void show_guest_stack(void) { int i; - execution_context_t *ec = get_execution_context(); - unsigned long *stack = (unsigned long *)ec->esp; - printk("Guest EIP is %08x\n ", ec->eip); + struct cpu_user_regs *regs = get_cpu_user_regs(); + unsigned long *stack = (unsigned long *)regs->esp; + + printk("Guest EIP is %08x\n ", regs->eip); for ( i = 0; i < kstack_depth_to_print; i++ ) { @@ -89,7 +90,7 @@ void show_stack(unsigned long *esp) show_trace( esp ); } -void show_registers(struct xen_regs *regs) +void show_registers(struct cpu_user_regs *regs) { unsigned long ss, ds, es, fs, gs, cs; unsigned long eip, esp, eflags; @@ -215,9 +216,9 @@ asmlinkage void do_double_fault(void) } BUILD_SMP_INTERRUPT(deferred_nmi, TRAP_deferred_nmi) -asmlinkage void smp_deferred_nmi(struct xen_regs regs) +asmlinkage void smp_deferred_nmi(struct cpu_user_regs regs) { - asmlinkage void do_nmi(struct xen_regs *, unsigned long); + asmlinkage void do_nmi(struct cpu_user_regs *, unsigned long); ack_APIC_irq(); do_nmi(®s, 0); } diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c index 787c5ffea4..3f06bc79c4 100644 --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -24,30 +24,30 @@ void __dummy__(void) { - OFFSET(XREGS_r15, struct xen_regs, r15); - OFFSET(XREGS_r14, struct xen_regs, r14); - OFFSET(XREGS_r13, struct xen_regs, r13); - OFFSET(XREGS_r12, struct xen_regs, r12); - OFFSET(XREGS_rbp, struct xen_regs, rbp); - OFFSET(XREGS_rbx, struct xen_regs, rbx); - OFFSET(XREGS_r11, struct xen_regs, r11); - OFFSET(XREGS_r10, struct xen_regs, r10); - OFFSET(XREGS_r9, struct xen_regs, r9); - OFFSET(XREGS_r8, struct xen_regs, r8); - OFFSET(XREGS_rax, struct xen_regs, rax); - OFFSET(XREGS_rcx, struct xen_regs, rcx); - OFFSET(XREGS_rdx, struct xen_regs, rdx); - OFFSET(XREGS_rsi, struct xen_regs, rsi); - OFFSET(XREGS_rdi, struct xen_regs, rdi); - OFFSET(XREGS_error_code, struct xen_regs, error_code); - OFFSET(XREGS_entry_vector, struct xen_regs, entry_vector); - OFFSET(XREGS_rip, struct xen_regs, rip); - OFFSET(XREGS_cs, struct xen_regs, cs); - OFFSET(XREGS_eflags, struct xen_regs, eflags); - OFFSET(XREGS_rsp, struct xen_regs, rsp); - OFFSET(XREGS_ss, struct xen_regs, ss); - OFFSET(XREGS_kernel_sizeof, struct xen_regs, es); - DEFINE(XREGS_user_sizeof, sizeof(struct xen_regs)); + OFFSET(UREGS_r15, struct cpu_user_regs, r15); + OFFSET(UREGS_r14, struct cpu_user_regs, r14); + OFFSET(UREGS_r13, struct cpu_user_regs, r13); + OFFSET(UREGS_r12, struct cpu_user_regs, r12); + OFFSET(UREGS_rbp, struct cpu_user_regs, rbp); + OFFSET(UREGS_rbx, struct cpu_user_regs, rbx); + OFFSET(UREGS_r11, struct cpu_user_regs, r11); + OFFSET(UREGS_r10, struct cpu_user_regs, r10); + OFFSET(UREGS_r9, struct cpu_user_regs, r9); + OFFSET(UREGS_r8, struct cpu_user_regs, r8); + OFFSET(UREGS_rax, struct cpu_user_regs, rax); + OFFSET(UREGS_rcx, struct cpu_user_regs, rcx); + OFFSET(UREGS_rdx, struct cpu_user_regs, rdx); + OFFSET(UREGS_rsi, struct cpu_user_regs, rsi); + OFFSET(UREGS_rdi, struct cpu_user_regs, rdi); + OFFSET(UREGS_error_code, struct cpu_user_regs, error_code); + OFFSET(UREGS_entry_vector, struct cpu_user_regs, entry_vector); + OFFSET(UREGS_rip, struct cpu_user_regs, rip); + OFFSET(UREGS_cs, struct cpu_user_regs, cs); + OFFSET(UREGS_eflags, struct cpu_user_regs, eflags); + OFFSET(UREGS_rsp, struct cpu_user_regs, rsp); + OFFSET(UREGS_ss, struct cpu_user_regs, ss); + OFFSET(UREGS_kernel_sizeof, struct cpu_user_regs, es); + DEFINE(UREGS_user_sizeof, sizeof(struct cpu_user_regs)); BLANK(); OFFSET(EDOMAIN_processor, struct exec_domain, processor); diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S index e6ee0e8fd3..518b1c5301 100644 --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -123,7 +123,7 @@ ENTRY(syscall_enter) leaq SYMBOL_NAME(hypercall_table)(%rip),%r10 PERFC_INCR(PERFC_hypercalls, %rax) callq *(%r10,%rax,8) - movq %rax,XREGS_rax(%rsp) # save the return value + movq %rax,UREGS_rax(%rsp) # save the return value /* %rbx: struct exec_domain */ test_all_events: @@ -160,7 +160,7 @@ test_all_events: * and we set it to the fixed value. * * We also need the room, especially because orig_eax field is used - * by do_IRQ(). Compared the xen_regs, we skip pushing for the following: + * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following: * (13) u64 gs_base_user; * (12) u64 gs_base_kernel; * (11) u64 fs_base; @@ -303,8 +303,8 @@ create_bounce_frame: movq EDOMAIN_kernel_sp(%rbx),%rsi jmp 2f 1: /* In kernel context already: push new frame at existing %rsp. */ - movq XREGS_rsp+8(%rsp),%rsi - andb $0xfc,XREGS_cs+8(%rsp) # Indicate kernel context to guest. + movq UREGS_rsp+8(%rsp),%rsi + andb $0xfc,UREGS_cs+8(%rsp) # Indicate kernel context to guest. 2: movq $HYPERVISOR_VIRT_START,%rax cmpq %rax,%rsi jb 1f # In +ve address space? Then okay. @@ -312,15 +312,15 @@ create_bounce_frame: cmpq %rax,%rsi jb domain_crash_synchronous # Above Xen private area? Then okay. 1: subq $40,%rsi - movq XREGS_ss+8(%rsp),%rax + movq UREGS_ss+8(%rsp),%rax FLT2: movq %rax,32(%rsi) # SS - movq XREGS_rsp+8(%rsp),%rax + movq UREGS_rsp+8(%rsp),%rax FLT3: movq %rax,24(%rsi) # RSP - movq XREGS_eflags+8(%rsp),%rax + movq UREGS_eflags+8(%rsp),%rax FLT4: movq %rax,16(%rsi) # RFLAGS - movq XREGS_cs+8(%rsp),%rax + movq UREGS_cs+8(%rsp),%rax FLT5: movq %rax,8(%rsi) # CS - movq XREGS_rip+8(%rsp),%rax + movq UREGS_rip+8(%rsp),%rax FLT6: movq %rax,(%rsi) # RIP movb TRAPBOUNCE_flags(%rdx),%cl testb $TBF_EXCEPTION_ERRCODE,%cl @@ -345,19 +345,19 @@ FLT11: movq %rax,8(%rsi) # ES movl %ds,%eax FLT12: movq %rax,(%rsi) # DS 2: subq $16,%rsi - movq XREGS_r11+8(%rsp),%rax + movq UREGS_r11+8(%rsp),%rax FLT13: movq %rax,8(%rsi) # R11 - movq XREGS_rcx+8(%rsp),%rax + movq UREGS_rcx+8(%rsp),%rax FLT14: movq %rax,(%rsi) # RCX /* Rewrite our stack frame and return to guest-OS mode. */ /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */ - movq $TRAP_syscall,XREGS_entry_vector+8(%rsp) - andl $0xfffcbeff,XREGS_eflags+8(%rsp) - movq $__GUEST_SS,XREGS_ss+8(%rsp) - movq %rsi,XREGS_rsp+8(%rsp) - movq $__GUEST_CS,XREGS_cs+8(%rsp) + movq $TRAP_syscall,UREGS_entry_vector+8(%rsp) + andl $0xfffcbeff,UREGS_eflags+8(%rsp) + movq $__GUEST_SS,UREGS_ss+8(%rsp) + movq %rsi,UREGS_rsp+8(%rsp) + movq $__GUEST_CS,UREGS_cs+8(%rsp) movq TRAPBOUNCE_eip(%rdx),%rax - movq %rax,XREGS_rip+8(%rsp) + movq %rax,UREGS_rip+8(%rsp) movb $0,TRAPBOUNCE_flags(%rdx) ret .section __ex_table,"a" @@ -383,7 +383,7 @@ process_guest_exception_and_events: /* No special register assumptions. */ ENTRY(ret_from_intr) GET_CURRENT(%rbx) - testb $3,XREGS_cs(%rsp) + testb $3,UREGS_cs(%rsp) jnz test_all_events jmp restore_all_xen @@ -391,43 +391,43 @@ ENTRY(ret_from_intr) /* No special register assumptions. */ error_code: SAVE_ALL - testb $X86_EFLAGS_IF>>8,XREGS_eflags+1(%rsp) + testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp) jz exception_with_ints_disabled sti movq %rsp,%rdi - movl XREGS_entry_vector(%rsp),%eax + movl UREGS_entry_vector(%rsp),%eax leaq SYMBOL_NAME(exception_table)(%rip),%rdx GET_CURRENT(%rbx) PERFC_INCR(PERFC_exceptions, %rax) callq *(%rdx,%rax,8) - testb $3,XREGS_cs(%rsp) + testb $3,UREGS_cs(%rsp) jz restore_all_xen jmp process_guest_exception_and_events /* No special register assumptions. */ exception_with_ints_disabled: - testb $3,XREGS_cs(%rsp) # interrupts disabled outside Xen? + testb $3,UREGS_cs(%rsp) # interrupts disabled outside Xen? jnz FATAL_exception_with_ints_disabled movq %rsp,%rdi call search_pre_exception_table testq %rax,%rax # no fixup code for faulting EIP? jz FATAL_exception_with_ints_disabled - movq %rax,XREGS_rip(%rsp) - subq $8,XREGS_rsp(%rsp) # add ec/ev to previous stack frame - testb $15,XREGS_rsp(%rsp) # return %rsp is now aligned? + movq %rax,UREGS_rip(%rsp) + subq $8,UREGS_rsp(%rsp) # add ec/ev to previous stack frame + testb $15,UREGS_rsp(%rsp) # return %rsp is now aligned? jz 1f # then there is a pad quadword already movq %rsp,%rsi subq $8,%rsp movq %rsp,%rdi - movq $XREGS_kernel_sizeof/8,%rcx + movq $UREGS_kernel_sizeof/8,%rcx rep; movsq # make room for ec/ev -1: movq XREGS_error_code(%rsp),%rax # ec/ev - movq %rax,XREGS_kernel_sizeof(%rsp) +1: movq UREGS_error_code(%rsp),%rax # ec/ev + movq %rax,UREGS_kernel_sizeof(%rsp) jmp restore_all_xen # return to fixup code /* No special register assumptions. */ FATAL_exception_with_ints_disabled: - movl XREGS_entry_vector(%rsp),%edi + movl UREGS_entry_vector(%rsp),%edi movq %rsp,%rsi call SYMBOL_NAME(fatal_trap) ud2 diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c index b93bc230d5..90b81bca02 100644 --- a/xen/arch/x86/x86_64/mm.c +++ b/xen/arch/x86/x86_64/mm.c @@ -253,19 +253,19 @@ long do_set_segment_base(unsigned int which, unsigned long base) switch ( which ) { case SEGBASE_FS: - ed->arch.user_ctxt.fs_base = base; + ed->arch.user_regs.fs_base = base; if ( wrmsr_user(MSR_FS_BASE, base, base>>32) ) ret = -EFAULT; break; case SEGBASE_GS_USER: - ed->arch.user_ctxt.gs_base_user = base; + ed->arch.user_regs.gs_base_user = base; if ( wrmsr_user(MSR_SHADOW_GS_BASE, base, base>>32) ) ret = -EFAULT; break; case SEGBASE_GS_KERNEL: - ed->arch.user_ctxt.gs_base_kernel = base; + ed->arch.user_regs.gs_base_kernel = base; if ( wrmsr_user(MSR_GS_BASE, base, base>>32) ) ret = -EFAULT; break; diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c index 3bfc85e97c..a81eea145a 100644 --- a/xen/arch/x86/x86_64/traps.c +++ b/xen/arch/x86/x86_64/traps.c @@ -24,9 +24,10 @@ static inline int kernel_text_address(unsigned long addr) void show_guest_stack(void) { int i; - execution_context_t *ec = get_execution_context(); - unsigned long *stack = (unsigned long *)ec->rsp; - printk("Guest RIP is %016lx\n ", ec->rip); + struct cpu_user_regs *regs = get_cpu_user_regs(); + unsigned long *stack = (unsigned long *)regs->rsp; + + printk("Guest RIP is %016lx\n ", regs->rip); for ( i = 0; i < kstack_depth_to_print; i++ ) { @@ -84,7 +85,7 @@ void show_stack(unsigned long *rsp) show_trace(rsp); } -void show_registers(struct xen_regs *regs) +void show_registers(struct cpu_user_regs *regs) { printk("CPU: %d\nEIP: %04lx:[<%016lx>] \nEFLAGS: %016lx\n", smp_processor_id(), 0xffff & regs->cs, regs->rip, regs->eflags); @@ -130,7 +131,7 @@ void show_page_walk(unsigned long addr) } asmlinkage void double_fault(void); -asmlinkage void do_double_fault(struct xen_regs *regs) +asmlinkage void do_double_fault(struct cpu_user_regs *regs) { /* Disable the NMI watchdog. It's useless now. */ watchdog_on = 0; diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c index c7f752e776..c13e28de41 100644 --- a/xen/arch/x86/x86_emulate.c +++ b/xen/arch/x86/x86_emulate.c @@ -377,7 +377,7 @@ do{ __asm__ __volatile__ ( \ void * decode_register( - u8 modrm_reg, struct xen_regs *regs, int highbyte_regs) + u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs) { void *p; @@ -417,7 +417,7 @@ decode_register( int x86_emulate_memop( - struct xen_regs *regs, + struct cpu_user_regs *regs, unsigned long cr2, struct x86_mem_emulator *ops, int mode) @@ -430,7 +430,7 @@ x86_emulate_memop( struct operand src, dst; /* Shadow copy of register state. Committed on successful emulation. */ - struct xen_regs _regs = *regs; + struct cpu_user_regs _regs = *regs; /* Legacy prefixes. */ for ( i = 0; i < 8; i++ ) diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c index dcab2dd0d3..5634fc3936 100644 --- a/xen/common/dom0_ops.c +++ b/xen/common/dom0_ops.c @@ -21,7 +21,7 @@ extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op); extern void arch_getdomaininfo_ctxt( - struct exec_domain *, full_execution_context_t *); + struct exec_domain *, struct vcpu_guest_context *); static inline int is_free_domid(domid_t dom) { @@ -279,7 +279,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) case DOM0_GETDOMAININFO: { - full_execution_context_t *c; + struct vcpu_guest_context *c; struct domain *d; struct exec_domain *ed; @@ -331,7 +331,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op) if ( op->u.getdomaininfo.ctxt != NULL ) { - if ( (c = xmalloc(full_execution_context_t)) == NULL ) + if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) { ret = -ENOMEM; put_domain(d); diff --git a/xen/common/domain.c b/xen/common/domain.c index 1184dbaf67..ea2ea364a5 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -231,7 +231,7 @@ void domain_destruct(struct domain *d) int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo) { int rc = 0; - full_execution_context_t *c = NULL; + struct vcpu_guest_context *c = NULL; unsigned long vcpu = setdomaininfo->exec_domain; struct exec_domain *ed; @@ -242,7 +242,7 @@ int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo) !test_bit(EDF_CTRLPAUSE, &ed->ed_flags)) return -EINVAL; - if ( (c = xmalloc(full_execution_context_t)) == NULL ) + if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) return -ENOMEM; if ( copy_from_user(c, setdomaininfo->ctxt, sizeof(*c)) ) @@ -266,12 +266,12 @@ int set_info_guest(struct domain *p, dom0_setdomaininfo_t *setdomaininfo) * than domain 0. ie. the domains that are being built by the userspace dom0 * domain builder. */ -long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt) +long do_boot_vcpu(unsigned long vcpu, struct vcpu_guest_context *ctxt) { struct domain *d = current->domain; struct exec_domain *ed; int rc = 0; - full_execution_context_t *c; + struct vcpu_guest_context *c; if ( (vcpu >= MAX_VIRT_CPUS) || (d->exec_domain[vcpu] != NULL) ) return -EINVAL; @@ -279,7 +279,7 @@ long do_boot_vcpu(unsigned long vcpu, full_execution_context_t *ctxt) if ( alloc_exec_domain_struct(d, vcpu) == NULL ) return -ENOMEM; - if ( (c = xmalloc(full_execution_context_t)) == NULL ) + if ( (c = xmalloc(struct vcpu_guest_context)) == NULL ) { rc = -ENOMEM; goto out; diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index d5a83b30d7..ca43f2f3c3 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -36,7 +36,7 @@ static void keypress_softirq(void) (*h)(key); } -void handle_keypress(unsigned char key, struct xen_regs *regs) +void handle_keypress(unsigned char key, struct cpu_user_regs *regs) { irq_keyhandler_t *h; @@ -83,13 +83,13 @@ static void show_handlers(unsigned char key) key_table[i].desc); } -static void dump_registers(unsigned char key, struct xen_regs *regs) +static void dump_registers(unsigned char key, struct cpu_user_regs *regs) { printk("'%c' pressed -> dumping registers\n", key); show_registers(regs); } -static void halt_machine(unsigned char key, struct xen_regs *regs) +static void halt_machine(unsigned char key, struct cpu_user_regs *regs) { printk("'%c' pressed -> rebooting machine\n", key); machine_restart(NULL); @@ -125,9 +125,12 @@ static void do_task_queues(unsigned char key) printk("Notifying guest... %d/%d\n", d->id, ed->eid); printk("port %d/%d stat %d %d %d\n", VIRQ_DEBUG, ed->virq_to_evtchn[VIRQ_DEBUG], - test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_pending[0]), - test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], &d->shared_info->evtchn_mask[0]), - test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, &ed->vcpu_info->evtchn_pending_sel)); + test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], + &d->shared_info->evtchn_pending[0]), + test_bit(ed->virq_to_evtchn[VIRQ_DEBUG], + &d->shared_info->evtchn_mask[0]), + test_bit(ed->virq_to_evtchn[VIRQ_DEBUG]>>5, + &ed->vcpu_info->evtchn_pending_sel)); send_guest_virq(ed, VIRQ_DEBUG); } } @@ -147,7 +150,7 @@ extern void perfc_printall(unsigned char key); extern void perfc_reset(unsigned char key); #endif -void do_debug_key(unsigned char key, struct xen_regs *regs) +void do_debug_key(unsigned char key, struct cpu_user_regs *regs) { (void)debugger_trap_fatal(0xf001, regs); nop(); /* Prevent the compiler doing tail call diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index 5277d7b3a9..471fa18cb3 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -260,7 +260,7 @@ static void switch_serial_input(void) } } -static void __serial_rx(unsigned char c, struct xen_regs *regs) +static void __serial_rx(unsigned char c, struct cpu_user_regs *regs) { if ( xen_rx ) { @@ -274,7 +274,7 @@ static void __serial_rx(unsigned char c, struct xen_regs *regs) } } -static void serial_rx(unsigned char c, struct xen_regs *regs) +static void serial_rx(unsigned char c, struct cpu_user_regs *regs) { static int switch_code_count = 0; diff --git a/xen/drivers/char/serial.c b/xen/drivers/char/serial.c index 8db47dc903..ba58bc8a16 100644 --- a/xen/drivers/char/serial.c +++ b/xen/drivers/char/serial.c @@ -105,7 +105,7 @@ static struct uart com[2] = { * PRIVATE FUNCTIONS */ -static void uart_rx(struct uart *uart, struct xen_regs *regs) +static void uart_rx(struct uart *uart, struct cpu_user_regs *regs) { unsigned char c; @@ -132,7 +132,7 @@ static void uart_rx(struct uart *uart, struct xen_regs *regs) } static void serial_interrupt( - int irq, void *dev_id, struct xen_regs *regs) + int irq, void *dev_id, struct cpu_user_regs *regs) { uart_rx((struct uart *)dev_id, regs); } diff --git a/xen/include/asm-ia64/debugger.h b/xen/include/asm-ia64/debugger.h index 459b7c2695..e933b11487 100644 --- a/xen/include/asm-ia64/debugger.h +++ b/xen/include/asm-ia64/debugger.h @@ -26,13 +26,13 @@ /* The main trap handlers use these helper macros which include early bail. */ static inline int debugger_trap_entry( - unsigned int vector, struct xen_regs *regs) + unsigned int vector, struct cpu_user_regs *regs) { return 0; } static inline int debugger_trap_fatal( - unsigned int vector, struct xen_regs *regs) + unsigned int vector, struct cpu_user_regs *regs) { return 0; } diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h index 7a5d1166a3..6425368f51 100644 --- a/xen/include/asm-ia64/domain.h +++ b/xen/include/asm-ia64/domain.h @@ -6,7 +6,7 @@ extern void arch_do_createdomain(struct exec_domain *); extern int arch_final_setup_guestos( - struct exec_domain *, full_execution_context_t *); + struct exec_domain *, struct vcpu_guest_context *); extern void domain_relinquish_resources(struct domain *); diff --git a/xen/include/asm-ia64/regs.h b/xen/include/asm-ia64/regs.h index e429039f1f..f3f803d24c 100644 --- a/xen/include/asm-ia64/regs.h +++ b/xen/include/asm-ia64/regs.h @@ -1,2 +1,2 @@ #include -#define xen_regs pt_regs +#define cpu_user_regs pt_regs diff --git a/xen/include/asm-x86/apic.h b/xen/include/asm-x86/apic.h index 403664ca37..61d3f8fe47 100644 --- a/xen/include/asm-x86/apic.h +++ b/xen/include/asm-x86/apic.h @@ -74,10 +74,10 @@ extern void sync_Arb_IDs (void); extern void init_bsp_APIC (void); extern void setup_local_APIC (void); extern void init_apic_mappings (void); -extern void smp_local_timer_interrupt (struct xen_regs * regs); +extern void smp_local_timer_interrupt (struct cpu_user_regs * regs); extern void setup_APIC_clocks (void); extern void setup_apic_nmi_watchdog (void); -extern void nmi_watchdog_tick (struct xen_regs * regs); +extern void nmi_watchdog_tick (struct cpu_user_regs * regs); extern void touch_nmi_watchdog(void); extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); diff --git a/xen/include/asm-x86/debugger.h b/xen/include/asm-x86/debugger.h index 807e29ea06..dbd9acb0ed 100644 --- a/xen/include/asm-x86/debugger.h +++ b/xen/include/asm-x86/debugger.h @@ -38,11 +38,11 @@ #define DEBUGGER_trap_fatal(_v, _r) \ if ( debugger_trap_fatal(_v, _r) ) return EXCRET_fault_fixed; -int call_with_registers(int (*f)(struct xen_regs *r)); +int call_with_registers(int (*f)(struct cpu_user_regs *r)); #if defined(CRASH_DEBUG) -extern int __trap_to_cdb(struct xen_regs *r); +extern int __trap_to_cdb(struct cpu_user_regs *r); #define debugger_trap_entry(_v, _r) (0) #define debugger_trap_fatal(_v, _r) __trap_to_cdb(_r) #define debugger_trap_immediate() call_with_registers(__trap_to_cdb) @@ -52,7 +52,7 @@ extern int __trap_to_cdb(struct xen_regs *r); #include static inline int debugger_trap_entry( - unsigned int vector, struct xen_regs *regs) + unsigned int vector, struct cpu_user_regs *regs) { struct exec_domain *ed = current; @@ -77,16 +77,16 @@ static inline int debugger_trap_entry( #elif 0 -extern int kdb_trap(int, int, struct xen_regs *); +extern int kdb_trap(int, int, struct cpu_user_regs *); static inline int debugger_trap_entry( - unsigned int vector, struct xen_regs *regs) + unsigned int vector, struct cpu_user_regs *regs) { return 0; } static inline int debugger_trap_fatal( - unsigned int vector, struct xen_regs *regs) + unsigned int vector, struct cpu_user_regs *regs) { return kdb_trap(vector, 0, regs); } diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 1d3872a48e..5f875d6e2f 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -78,7 +78,7 @@ struct arch_exec_domain struct i387_state i387; /* general user-visible register state */ - execution_context_t user_ctxt; + struct cpu_user_regs user_regs; void (*schedule_tail) (struct exec_domain *); diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h index d5079e2fda..ad8aab082b 100644 --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -191,7 +191,9 @@ extern void dodgy_tsc(void); /* * Generic CPUID function */ -static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) +static inline void cpuid( + int op, unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) { __asm__("cpuid" : "=a" (*eax), @@ -405,7 +407,7 @@ long set_fast_trap(struct exec_domain *p, int idx); #endif -extern int gpf_emulate_4gb(struct xen_regs *regs); +extern int gpf_emulate_4gb(struct cpu_user_regs *regs); extern void write_ptbase(struct exec_domain *ed); @@ -499,9 +501,9 @@ extern inline void prefetchw(const void *x) void show_guest_stack(); void show_trace(unsigned long *esp); void show_stack(unsigned long *esp); -void show_registers(struct xen_regs *regs); +void show_registers(struct cpu_user_regs *regs); void show_page_walk(unsigned long addr); -asmlinkage void fatal_trap(int trapnr, struct xen_regs *regs); +asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs); #endif /* !__ASSEMBLY__ */ diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index 88ea9e6dac..d52708ac1e 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -63,7 +63,7 @@ extern void shadow_mode_init(void); extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc); -extern int shadow_fault(unsigned long va, struct xen_regs *regs); +extern int shadow_fault(unsigned long va, struct cpu_user_regs *regs); extern int shadow_mode_enable(struct domain *p, unsigned int mode); extern void shadow_invlpg(struct exec_domain *, unsigned long); extern struct out_of_sync_entry *shadow_mark_mfn_out_of_sync( diff --git a/xen/include/asm-x86/vmx.h b/xen/include/asm-x86/vmx.h index de79484963..334afcf6be 100644 --- a/xen/include/asm-x86/vmx.h +++ b/xen/include/asm-x86/vmx.h @@ -25,7 +25,7 @@ #include #include -extern void vmx_asm_vmexit_handler(struct xen_regs); +extern void vmx_asm_vmexit_handler(struct cpu_user_regs); extern void vmx_asm_do_resume(void); extern void vmx_asm_do_launch(void); extern void vmx_intr_assist(struct exec_domain *d); diff --git a/xen/include/asm-x86/vmx_platform.h b/xen/include/asm-x86/vmx_platform.h index 90cab0a0d1..d58a45da9d 100644 --- a/xen/include/asm-x86/vmx_platform.h +++ b/xen/include/asm-x86/vmx_platform.h @@ -73,7 +73,7 @@ struct instruction { struct mi_per_cpu_info { unsigned long mmio_target; - struct xen_regs *inst_decoder_regs; + struct cpu_user_regs *inst_decoder_regs; }; struct virutal_platform_def { @@ -85,7 +85,7 @@ struct virutal_platform_def { }; extern void handle_mmio(unsigned long, unsigned long); -extern int vmx_setup_platform(struct exec_domain *, execution_context_t *); +extern int vmx_setup_platform(struct exec_domain *, struct cpu_user_regs *); // XXX - think about this -- maybe use bit 30 of the mfn to signify an MMIO frame. #define mmio_space(gpa) (!VALID_MFN(phys_to_machine_mapping((gpa) >> PAGE_SHIFT))) diff --git a/xen/include/asm-x86/vmx_vmcs.h b/xen/include/asm-x86/vmx_vmcs.h index b972a17011..1a39bb7a02 100644 --- a/xen/include/asm-x86/vmx_vmcs.h +++ b/xen/include/asm-x86/vmx_vmcs.h @@ -65,8 +65,8 @@ void free_vmcs(struct vmcs_struct *); int load_vmcs(struct arch_vmx_struct *, u64); int store_vmcs(struct arch_vmx_struct *, u64); void dump_vmcs(void); -int construct_vmcs(struct arch_vmx_struct *, execution_context_t *, - full_execution_context_t *, int); +int construct_vmcs(struct arch_vmx_struct *, struct cpu_user_regs *, + struct vcpu_guest_context *, int); #define VMCS_USE_HOST_ENV 1 #define VMCS_USE_SEPARATE_ENV 0 diff --git a/xen/include/asm-x86/x86_32/asm_defns.h b/xen/include/asm-x86/x86_32/asm_defns.h index 8cdd9c5498..6782c794af 100644 --- a/xen/include/asm-x86/x86_32/asm_defns.h +++ b/xen/include/asm-x86/x86_32/asm_defns.h @@ -13,16 +13,16 @@ "pushl %edx;" \ "pushl %ecx;" \ "pushl %ebx;" \ - "testl $"STR(X86_EFLAGS_VM)","STR(XREGS_eflags)"(%esp);" \ + "testl $"STR(X86_EFLAGS_VM)","STR(UREGS_eflags)"(%esp);" \ "jz 2f;" \ "call setup_vm86_frame;" \ "jmp 3f;" \ - "2:testb $3,"STR(XREGS_cs)"(%esp);" \ + "2:testb $3,"STR(UREGS_cs)"(%esp);" \ "jz 1f;" \ - "movl %ds,"STR(XREGS_ds)"(%esp);" \ - "movl %es,"STR(XREGS_es)"(%esp);" \ - "movl %fs,"STR(XREGS_fs)"(%esp);" \ - "movl %gs,"STR(XREGS_gs)"(%esp);" \ + "movl %ds,"STR(UREGS_ds)"(%esp);" \ + "movl %es,"STR(UREGS_es)"(%esp);" \ + "movl %fs,"STR(UREGS_fs)"(%esp);" \ + "movl %gs,"STR(UREGS_gs)"(%esp);" \ "3:" #define SAVE_ALL_NOSEGREGS(_reg) \ @@ -50,16 +50,16 @@ pushl %edx; \ pushl %ecx; \ pushl %ebx; \ - testl $X86_EFLAGS_VM,XREGS_eflags(%esp); \ + testl $X86_EFLAGS_VM,UREGS_eflags(%esp); \ jz 2f; \ call setup_vm86_frame; \ jmp 3f; \ - 2:testb $3,XREGS_cs(%esp); \ + 2:testb $3,UREGS_cs(%esp); \ jz 1f; \ - movl %ds,XREGS_ds(%esp); \ - movl %es,XREGS_es(%esp); \ - movl %fs,XREGS_fs(%esp); \ - movl %gs,XREGS_gs(%esp); \ + movl %ds,UREGS_ds(%esp); \ + movl %es,UREGS_es(%esp); \ + movl %fs,UREGS_fs(%esp); \ + movl %gs,UREGS_gs(%esp); \ 3: #define SAVE_ALL_NOSEGREGS(_reg) \ @@ -98,7 +98,7 @@ __asm__( \ #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v) #define XBUILD_SMP_TIMER_INTERRUPT(x,v) \ -asmlinkage void x(struct xen_regs * regs); \ +asmlinkage void x(struct cpu_user_regs * regs); \ __asm__( \ "\n"__ALIGN_STR"\n" \ SYMBOL_NAME_STR(x) ":\n\t" \ diff --git a/xen/include/asm-x86/x86_32/current.h b/xen/include/asm-x86/x86_32/current.h index 9dbcb06f28..fe7db8a14c 100644 --- a/xen/include/asm-x86/x86_32/current.h +++ b/xen/include/asm-x86/x86_32/current.h @@ -5,7 +5,7 @@ struct domain; #define STACK_RESERVED \ - (sizeof(execution_context_t) + sizeof(struct domain *)) + (sizeof(struct cpu_user_regs) + sizeof(struct domain *)) static inline struct exec_domain *get_current(void) { @@ -23,13 +23,13 @@ static inline void set_current(struct exec_domain *ed) : : "r" (STACK_SIZE-4), "r" (ed) ); } -static inline execution_context_t *get_execution_context(void) +static inline struct cpu_user_regs *get_cpu_user_regs(void) { - execution_context_t *execution_context; + struct cpu_user_regs *cpu_user_regs; __asm__ ( "andl %%esp,%0; addl %2,%0" - : "=r" (execution_context) + : "=r" (cpu_user_regs) : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) ); - return execution_context; + return cpu_user_regs; } /* @@ -49,7 +49,7 @@ static inline unsigned long get_stack_bottom(void) #define reset_stack_and_jump(__fn) \ __asm__ __volatile__ ( \ "movl %0,%%esp; jmp "STR(__fn) \ - : : "r" (get_execution_context()) ) + : : "r" (get_cpu_user_regs()) ) #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed) diff --git a/xen/include/asm-x86/x86_32/regs.h b/xen/include/asm-x86/x86_32/regs.h index 0e2e8622a4..f2bdb3606e 100644 --- a/xen/include/asm-x86/x86_32/regs.h +++ b/xen/include/asm-x86/x86_32/regs.h @@ -16,6 +16,6 @@ ((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3))) /* Number of bytes of on-stack execution state to be context-switched. */ -#define CTXT_SWITCH_STACK_BYTES (sizeof(execution_context_t)) +#define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs)) #endif diff --git a/xen/include/asm-x86/x86_64/asm_defns.h b/xen/include/asm-x86/x86_64/asm_defns.h index d5f0e5b6ad..429ea5cc9f 100644 --- a/xen/include/asm-x86/x86_64/asm_defns.h +++ b/xen/include/asm-x86/x86_64/asm_defns.h @@ -106,7 +106,7 @@ __asm__( \ #define BUILD_SMP_TIMER_INTERRUPT(x,v) XBUILD_SMP_TIMER_INTERRUPT(x,v) #define XBUILD_SMP_TIMER_INTERRUPT(x,v) \ -asmlinkage void x(struct xen_regs * regs); \ +asmlinkage void x(struct cpu_user_regs * regs); \ __asm__( \ "\n"__ALIGN_STR"\n" \ SYMBOL_NAME_STR(x) ":\n\t" \ diff --git a/xen/include/asm-x86/x86_64/current.h b/xen/include/asm-x86/x86_64/current.h index a75c1d2526..6910471466 100644 --- a/xen/include/asm-x86/x86_64/current.h +++ b/xen/include/asm-x86/x86_64/current.h @@ -5,7 +5,7 @@ struct domain; #define STACK_RESERVED \ - (sizeof(execution_context_t) + sizeof(struct domain *)) + (sizeof(struct cpu_user_regs) + sizeof(struct domain *)) static inline struct exec_domain *get_current(void) { @@ -23,13 +23,13 @@ static inline void set_current(struct exec_domain *ed) : : "r" (STACK_SIZE-8), "r" (ed) ); } -static inline execution_context_t *get_execution_context(void) +static inline struct cpu_user_regs *get_cpu_user_regs(void) { - execution_context_t *execution_context; + struct cpu_user_regs *cpu_user_regs; __asm__( "andq %%rsp,%0; addq %2,%0" - : "=r" (execution_context) + : "=r" (cpu_user_regs) : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) ); - return execution_context; + return cpu_user_regs; } /* @@ -49,7 +49,7 @@ static inline unsigned long get_stack_bottom(void) #define reset_stack_and_jump(__fn) \ __asm__ __volatile__ ( \ "movq %0,%%rsp; jmp "STR(__fn) \ - : : "r" (get_execution_context()) ) + : : "r" (get_cpu_user_regs()) ) #define schedule_tail(_ed) ((_ed)->arch.schedule_tail)(_ed) diff --git a/xen/include/asm-x86/x86_64/regs.h b/xen/include/asm-x86/x86_64/regs.h index c476282f2d..38d31db1bb 100644 --- a/xen/include/asm-x86/x86_64/regs.h +++ b/xen/include/asm-x86/x86_64/regs.h @@ -17,6 +17,6 @@ /* Number of bytes of on-stack execution state to be context-switched. */ /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */ -#define CTXT_SWITCH_STACK_BYTES (offsetof(execution_context_t, es)) +#define CTXT_SWITCH_STACK_BYTES (offsetof(struct cpu_user_regs, es)) #endif diff --git a/xen/include/asm-x86/x86_emulate.h b/xen/include/asm-x86/x86_emulate.h index 79197fed9c..bfcc987f57 100644 --- a/xen/include/asm-x86/x86_emulate.h +++ b/xen/include/asm-x86/x86_emulate.h @@ -139,7 +139,7 @@ x86_emulate_write_std( unsigned long val, unsigned int bytes); -struct xen_regs; +struct cpu_user_regs; /* * x86_emulate_memop: Emulate an instruction that faulted attempting to @@ -152,7 +152,7 @@ struct xen_regs; */ extern int x86_emulate_memop( - struct xen_regs *regs, + struct cpu_user_regs *regs, unsigned long cr2, struct x86_mem_emulator *ops, int mode); @@ -164,6 +164,6 @@ x86_emulate_memop( */ extern void * decode_register( - u8 modrm_reg, struct xen_regs *regs, int highbyte_regs); + u8 modrm_reg, struct cpu_user_regs *regs, int highbyte_regs); #endif /* __X86_EMULATE_H__ */ diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h index ff53be3d85..ee5e4217c9 100644 --- a/xen/include/public/arch-ia64.h +++ b/xen/include/public/arch-ia64.h @@ -22,7 +22,7 @@ typedef unsigned long cpureg_t; /* Full-sized register. */ typedef struct { -} PACKED execution_context_t; +} PACKED struct cpu_user_regs; /* * NB. This may become a 64-bit count with no shift. If this happens then the @@ -91,9 +91,9 @@ typedef struct { * The following is all CPU context. Note that the i387_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ -typedef struct { +typedef struct vcpu_guest_context { //unsigned long flags; -} PACKED full_execution_context_t; +} PACKED vcpu_guest_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/xen/include/public/arch-x86_32.h b/xen/include/public/arch-x86_32.h index e76f967056..b5dd475a2a 100644 --- a/xen/include/public/arch-x86_32.h +++ b/xen/include/public/arch-x86_32.h @@ -97,8 +97,7 @@ typedef struct { memory_t address; /* 4: code address */ } PACKED trap_info_t; /* 8 bytes */ -typedef struct xen_regs -{ +typedef struct cpu_user_regs { u32 ebx; u32 ecx; u32 edx; @@ -117,7 +116,7 @@ typedef struct xen_regs u32 ds; u32 fs; u32 gs; -} PACKED execution_context_t; +} cpu_user_regs_t; typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ @@ -125,12 +124,12 @@ typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ * The following is all CPU context. Note that the i387_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ -typedef struct { +typedef struct vcpu_guest_context { #define ECF_I387_VALID (1<<0) #define ECF_VMX_GUEST (1<<1) -#define ECF_IN_KERNEL (1<<2) +#define ECF_IN_KERNEL (1<<2) unsigned long flags; - execution_context_t cpu_ctxt; /* User-level CPU registers */ + cpu_user_regs_t user_regs; /* User-level CPU registers */ char fpu_ctxt[256]; /* User-level FPU registers */ trap_info_t trap_ctxt[256]; /* Virtual IDT */ unsigned int fast_trap_idx; /* "Fast trap" vector offset */ @@ -144,7 +143,7 @@ typedef struct { unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */ unsigned long failsafe_callback_eip; unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ -} PACKED full_execution_context_t; +} PACKED vcpu_guest_context_t; typedef struct { /* MFN of a table of MFNs that make up p2m table */ diff --git a/xen/include/public/arch-x86_64.h b/xen/include/public/arch-x86_64.h index 59a2d8086e..c20327d269 100644 --- a/xen/include/public/arch-x86_64.h +++ b/xen/include/public/arch-x86_64.h @@ -142,8 +142,7 @@ typedef struct { memory_t address; /* 8: code address */ } PACKED trap_info_t; /* 16 bytes */ -typedef struct xen_regs -{ +typedef struct cpu_user_regs { u64 r15; u64 r14; u64 r13; @@ -173,7 +172,7 @@ typedef struct xen_regs u64 fs_base; u64 gs_base_kernel; u64 gs_base_user; -} PACKED execution_context_t; +} cpu_user_regs_t; typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ @@ -181,12 +180,12 @@ typedef u64 tsc_timestamp_t; /* RDTSC timestamp */ * The following is all CPU context. Note that the i387_ctxt block is filled * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used. */ -typedef struct { +typedef struct vcpu_guest_context { #define ECF_I387_VALID (1<<0) #define ECF_VMX_GUEST (1<<1) -#define ECF_IN_KERNEL (1<<2) +#define ECF_IN_KERNEL (1<<2) unsigned long flags; - execution_context_t cpu_ctxt; /* User-level CPU registers */ + cpu_user_regs_t user_regs; /* User-level CPU registers */ char fpu_ctxt[512]; /* User-level FPU registers */ trap_info_t trap_ctxt[256]; /* Virtual IDT */ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */ @@ -198,7 +197,7 @@ typedef struct { unsigned long failsafe_callback_eip; unsigned long syscall_callback_eip; unsigned long vm_assist; /* VMASST_TYPE_* bitmap */ -} PACKED full_execution_context_t; +} PACKED vcpu_guest_context_t; typedef struct { /* MFN of a table of MFNs that make up p2m table */ diff --git a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h index 1086ae85d6..828b00f9fe 100644 --- a/xen/include/public/dom0_ops.h +++ b/xen/include/public/dom0_ops.h @@ -83,7 +83,7 @@ typedef struct { #define DOMFLAGS_SHUTDOWNMASK 255 /* DOMFLAGS_SHUTDOWN guest-supplied code. */ #define DOMFLAGS_SHUTDOWNSHIFT 16 u32 flags; - full_execution_context_t *ctxt; /* NB. IN/OUT variable. */ + vcpu_guest_context_t *ctxt; /* NB. IN/OUT variable. */ memory_t tot_pages; memory_t max_pages; memory_t shared_info_frame; /* MFN of shared_info struct */ @@ -96,7 +96,7 @@ typedef struct { domid_t domain; u16 exec_domain; /* IN/OUT parameters */ - full_execution_context_t *ctxt; + vcpu_guest_context_t *ctxt; } dom0_setdomaininfo_t; #define DOM0_MSR 15 diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index 3a37b59d4f..d7f135db2e 100644 --- a/xen/include/xen/domain.h +++ b/xen/include/xen/domain.h @@ -15,7 +15,7 @@ extern void arch_do_createdomain(struct exec_domain *ed); extern void arch_do_boot_vcpu(struct exec_domain *ed); extern int arch_set_info_guest( - struct exec_domain *d, full_execution_context_t *c); + struct exec_domain *d, struct vcpu_guest_context *c); extern void free_perdomain_pt(struct domain *d); diff --git a/xen/include/xen/irq.h b/xen/include/xen/irq.h index fd00e5e5eb..6c77460f41 100644 --- a/xen/include/xen/irq.h +++ b/xen/include/xen/irq.h @@ -8,7 +8,7 @@ struct irqaction { - void (*handler)(int, void *, struct xen_regs *); + void (*handler)(int, void *, struct cpu_user_regs *); const char *name; void *dev_id; }; @@ -63,7 +63,7 @@ extern int setup_irq(unsigned int, struct irqaction *); extern void free_irq(unsigned int); extern hw_irq_controller no_irq_type; -extern void no_action(int cpl, void *dev_id, struct xen_regs *regs); +extern void no_action(int cpl, void *dev_id, struct cpu_user_regs *regs); struct domain; struct exec_domain; diff --git a/xen/include/xen/keyhandler.h b/xen/include/xen/keyhandler.h index e09cdacc9f..112ab3475b 100644 --- a/xen/include/xen/keyhandler.h +++ b/xen/include/xen/keyhandler.h @@ -23,11 +23,11 @@ extern void register_keyhandler( * synchronously in hard-IRQ context with interrupts disabled. The @regs * callback parameter points at the interrupted register context. */ -typedef void irq_keyhandler_t(unsigned char key, struct xen_regs *regs); +typedef void irq_keyhandler_t(unsigned char key, struct cpu_user_regs *regs); extern void register_irq_keyhandler( unsigned char key, irq_keyhandler_t *handler, char *desc); /* Inject a keypress into the key-handling subsystem. */ -extern void handle_keypress(unsigned char key, struct xen_regs *regs); +extern void handle_keypress(unsigned char key, struct cpu_user_regs *regs); #endif /* __XEN_KEYHANDLER_H__ */ diff --git a/xen/include/xen/serial.h b/xen/include/xen/serial.h index 4d33ddb17b..01e5236dd5 100644 --- a/xen/include/xen/serial.h +++ b/xen/include/xen/serial.h @@ -28,7 +28,7 @@ void serial_init_stage2(void); int parse_serial_handle(char *conf); /* Register a character-receive hook on the specified COM port. */ -typedef void (*serial_rx_fn)(unsigned char, struct xen_regs *); +typedef void (*serial_rx_fn)(unsigned char, struct cpu_user_regs *); void serial_set_rx_handler(int handle, serial_rx_fn fn); /* Transmit a single character via the specified COM port. */ -- 2.30.2